staging: greybus: move the greybus core to drivers/greybus
The Greybus core code has been stable for a long time, and has been shipping for many years in millions of phones. With the advent of a recent Google Summer of Code project, and a number of new devices in the works from various companies, it is time to get the core greybus code out of staging as it really is going to be with us for a while. Cc: Johan Hovold <johan@kernel.org> Cc: linux-kernel@vger.kernel.org Cc: greybus-dev@lists.linaro.org Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Acked-by: Alex Elder <elder@kernel.org> Link: https://lore.kernel.org/r/20190825055429.18547-9-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
这个提交包含在:
16
drivers/greybus/Kconfig
普通文件
16
drivers/greybus/Kconfig
普通文件
@@ -0,0 +1,16 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
menuconfig GREYBUS
|
||||
tristate "Greybus support"
|
||||
depends on SYSFS
|
||||
---help---
|
||||
This option enables the Greybus driver core. Greybus is an
|
||||
hardware protocol that was designed to provide Unipro with a
|
||||
sane application layer. It was originally designed for the
|
||||
ARA project, a module phone system, but has shown up in other
|
||||
phones, and can be tunneled over other busses in order to
|
||||
control hardware devices.
|
||||
|
||||
Say Y here to enable support for these types of drivers.
|
||||
|
||||
To compile this code as a module, chose M here: the module
|
||||
will be called greybus.ko
|
19
drivers/greybus/Makefile
普通文件
19
drivers/greybus/Makefile
普通文件
@@ -0,0 +1,19 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Greybus core
|
||||
greybus-y := core.o \
|
||||
debugfs.o \
|
||||
hd.o \
|
||||
manifest.o \
|
||||
module.o \
|
||||
interface.o \
|
||||
bundle.o \
|
||||
connection.o \
|
||||
control.o \
|
||||
svc.o \
|
||||
svc_watchdog.o \
|
||||
operation.o
|
||||
|
||||
obj-$(CONFIG_GREYBUS) += greybus.o
|
||||
|
||||
# needed for trace events
|
||||
ccflags-y += -I$(src)
|
252
drivers/greybus/bundle.c
普通文件
252
drivers/greybus/bundle.c
普通文件
@@ -0,0 +1,252 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Greybus bundles
|
||||
*
|
||||
* Copyright 2014-2015 Google Inc.
|
||||
* Copyright 2014-2015 Linaro Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/greybus.h>
|
||||
#include "greybus_trace.h"
|
||||
|
||||
static ssize_t bundle_class_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gb_bundle *bundle = to_gb_bundle(dev);
|
||||
|
||||
return sprintf(buf, "0x%02x\n", bundle->class);
|
||||
}
|
||||
static DEVICE_ATTR_RO(bundle_class);
|
||||
|
||||
static ssize_t bundle_id_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gb_bundle *bundle = to_gb_bundle(dev);
|
||||
|
||||
return sprintf(buf, "%u\n", bundle->id);
|
||||
}
|
||||
static DEVICE_ATTR_RO(bundle_id);
|
||||
|
||||
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct gb_bundle *bundle = to_gb_bundle(dev);
|
||||
|
||||
if (!bundle->state)
|
||||
return sprintf(buf, "\n");
|
||||
|
||||
return sprintf(buf, "%s\n", bundle->state);
|
||||
}
|
||||
|
||||
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t size)
|
||||
{
|
||||
struct gb_bundle *bundle = to_gb_bundle(dev);
|
||||
|
||||
kfree(bundle->state);
|
||||
bundle->state = kstrdup(buf, GFP_KERNEL);
|
||||
if (!bundle->state)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Tell userspace that the file contents changed */
|
||||
sysfs_notify(&bundle->dev.kobj, NULL, "state");
|
||||
|
||||
return size;
|
||||
}
|
||||
static DEVICE_ATTR_RW(state);
|
||||
|
||||
static struct attribute *bundle_attrs[] = {
|
||||
&dev_attr_bundle_class.attr,
|
||||
&dev_attr_bundle_id.attr,
|
||||
&dev_attr_state.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
ATTRIBUTE_GROUPS(bundle);
|
||||
|
||||
static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
|
||||
u8 bundle_id)
|
||||
{
|
||||
struct gb_bundle *bundle;
|
||||
|
||||
list_for_each_entry(bundle, &intf->bundles, links) {
|
||||
if (bundle->id == bundle_id)
|
||||
return bundle;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void gb_bundle_release(struct device *dev)
|
||||
{
|
||||
struct gb_bundle *bundle = to_gb_bundle(dev);
|
||||
|
||||
trace_gb_bundle_release(bundle);
|
||||
|
||||
kfree(bundle->state);
|
||||
kfree(bundle->cport_desc);
|
||||
kfree(bundle);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
|
||||
{
|
||||
struct gb_connection *connection;
|
||||
|
||||
list_for_each_entry(connection, &bundle->connections, bundle_links)
|
||||
gb_connection_disable(connection);
|
||||
}
|
||||
|
||||
static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
|
||||
{
|
||||
struct gb_connection *connection;
|
||||
|
||||
list_for_each_entry(connection, &bundle->connections, bundle_links)
|
||||
gb_connection_enable(connection);
|
||||
}
|
||||
|
||||
static int gb_bundle_suspend(struct device *dev)
|
||||
{
|
||||
struct gb_bundle *bundle = to_gb_bundle(dev);
|
||||
const struct dev_pm_ops *pm = dev->driver->pm;
|
||||
int ret;
|
||||
|
||||
if (pm && pm->runtime_suspend) {
|
||||
ret = pm->runtime_suspend(&bundle->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
gb_bundle_disable_all_connections(bundle);
|
||||
}
|
||||
|
||||
ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
|
||||
if (ret) {
|
||||
if (pm && pm->runtime_resume)
|
||||
ret = pm->runtime_resume(dev);
|
||||
else
|
||||
gb_bundle_enable_all_connections(bundle);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gb_bundle_resume(struct device *dev)
|
||||
{
|
||||
struct gb_bundle *bundle = to_gb_bundle(dev);
|
||||
const struct dev_pm_ops *pm = dev->driver->pm;
|
||||
int ret;
|
||||
|
||||
ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pm && pm->runtime_resume) {
|
||||
ret = pm->runtime_resume(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
gb_bundle_enable_all_connections(bundle);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gb_bundle_idle(struct device *dev)
|
||||
{
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_request_autosuspend(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops gb_bundle_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
|
||||
};
|
||||
|
||||
struct device_type greybus_bundle_type = {
|
||||
.name = "greybus_bundle",
|
||||
.release = gb_bundle_release,
|
||||
.pm = &gb_bundle_pm_ops,
|
||||
};
|
||||
|
||||
/*
|
||||
* Create a gb_bundle structure to represent a discovered
|
||||
* bundle. Returns a pointer to the new bundle or a null
|
||||
* pointer if a failure occurs due to memory exhaustion.
|
||||
*/
|
||||
struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
|
||||
u8 class)
|
||||
{
|
||||
struct gb_bundle *bundle;
|
||||
|
||||
if (bundle_id == BUNDLE_ID_NONE) {
|
||||
dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reject any attempt to reuse a bundle id. We initialize
|
||||
* these serially, so there's no need to worry about keeping
|
||||
* the interface bundle list locked here.
|
||||
*/
|
||||
if (gb_bundle_find(intf, bundle_id)) {
|
||||
dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
|
||||
if (!bundle)
|
||||
return NULL;
|
||||
|
||||
bundle->intf = intf;
|
||||
bundle->id = bundle_id;
|
||||
bundle->class = class;
|
||||
INIT_LIST_HEAD(&bundle->connections);
|
||||
|
||||
bundle->dev.parent = &intf->dev;
|
||||
bundle->dev.bus = &greybus_bus_type;
|
||||
bundle->dev.type = &greybus_bundle_type;
|
||||
bundle->dev.groups = bundle_groups;
|
||||
bundle->dev.dma_mask = intf->dev.dma_mask;
|
||||
device_initialize(&bundle->dev);
|
||||
dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
|
||||
|
||||
list_add(&bundle->links, &intf->bundles);
|
||||
|
||||
trace_gb_bundle_create(bundle);
|
||||
|
||||
return bundle;
|
||||
}
|
||||
|
||||
int gb_bundle_add(struct gb_bundle *bundle)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = device_add(&bundle->dev);
|
||||
if (ret) {
|
||||
dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_gb_bundle_add(bundle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tear down a previously set up bundle.
|
||||
*/
|
||||
void gb_bundle_destroy(struct gb_bundle *bundle)
|
||||
{
|
||||
trace_gb_bundle_destroy(bundle);
|
||||
|
||||
if (device_is_registered(&bundle->dev))
|
||||
device_del(&bundle->dev);
|
||||
|
||||
list_del(&bundle->links);
|
||||
|
||||
put_device(&bundle->dev);
|
||||
}
|
942
drivers/greybus/connection.c
普通文件
942
drivers/greybus/connection.c
普通文件
@@ -0,0 +1,942 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Greybus connections
|
||||
*
|
||||
* Copyright 2014 Google Inc.
|
||||
* Copyright 2014 Linaro Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/greybus.h>
|
||||
|
||||
#include "greybus_trace.h"
|
||||
|
||||
#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
|
||||
|
||||
static void gb_connection_kref_release(struct kref *kref);
|
||||
|
||||
static DEFINE_SPINLOCK(gb_connections_lock);
|
||||
static DEFINE_MUTEX(gb_connection_mutex);
|
||||
|
||||
/* Caller holds gb_connection_mutex. */
|
||||
static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
|
||||
{
|
||||
struct gb_host_device *hd = intf->hd;
|
||||
struct gb_connection *connection;
|
||||
|
||||
list_for_each_entry(connection, &hd->connections, hd_links) {
|
||||
if (connection->intf == intf &&
|
||||
connection->intf_cport_id == cport_id)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void gb_connection_get(struct gb_connection *connection)
|
||||
{
|
||||
kref_get(&connection->kref);
|
||||
|
||||
trace_gb_connection_get(connection);
|
||||
}
|
||||
|
||||
static void gb_connection_put(struct gb_connection *connection)
|
||||
{
|
||||
trace_gb_connection_put(connection);
|
||||
|
||||
kref_put(&connection->kref, gb_connection_kref_release);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a reference-counted pointer to the connection if found.
|
||||
*/
|
||||
static struct gb_connection *
|
||||
gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
|
||||
{
|
||||
struct gb_connection *connection;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&gb_connections_lock, flags);
|
||||
list_for_each_entry(connection, &hd->connections, hd_links)
|
||||
if (connection->hd_cport_id == cport_id) {
|
||||
gb_connection_get(connection);
|
||||
goto found;
|
||||
}
|
||||
connection = NULL;
|
||||
found:
|
||||
spin_unlock_irqrestore(&gb_connections_lock, flags);
|
||||
|
||||
return connection;
|
||||
}
|
||||
|
||||
/*
|
||||
* Callback from the host driver to let us know that data has been
|
||||
* received on the bundle.
|
||||
*/
|
||||
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
|
||||
u8 *data, size_t length)
|
||||
{
|
||||
struct gb_connection *connection;
|
||||
|
||||
trace_gb_hd_in(hd);
|
||||
|
||||
connection = gb_connection_hd_find(hd, cport_id);
|
||||
if (!connection) {
|
||||
dev_err(&hd->dev,
|
||||
"nonexistent connection (%zu bytes dropped)\n", length);
|
||||
return;
|
||||
}
|
||||
gb_connection_recv(connection, data, length);
|
||||
gb_connection_put(connection);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(greybus_data_rcvd);
|
||||
|
||||
static void gb_connection_kref_release(struct kref *kref)
|
||||
{
|
||||
struct gb_connection *connection;
|
||||
|
||||
connection = container_of(kref, struct gb_connection, kref);
|
||||
|
||||
trace_gb_connection_release(connection);
|
||||
|
||||
kfree(connection);
|
||||
}
|
||||
|
||||
static void gb_connection_init_name(struct gb_connection *connection)
|
||||
{
|
||||
u16 hd_cport_id = connection->hd_cport_id;
|
||||
u16 cport_id = 0;
|
||||
u8 intf_id = 0;
|
||||
|
||||
if (connection->intf) {
|
||||
intf_id = connection->intf->interface_id;
|
||||
cport_id = connection->intf_cport_id;
|
||||
}
|
||||
|
||||
snprintf(connection->name, sizeof(connection->name),
|
||||
"%u/%u:%u", hd_cport_id, intf_id, cport_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* _gb_connection_create() - create a Greybus connection
|
||||
* @hd: host device of the connection
|
||||
* @hd_cport_id: host-device cport id, or -1 for dynamic allocation
|
||||
* @intf: remote interface, or NULL for static connections
|
||||
* @bundle: remote-interface bundle (may be NULL)
|
||||
* @cport_id: remote-interface cport id, or 0 for static connections
|
||||
* @handler: request handler (may be NULL)
|
||||
* @flags: connection flags
|
||||
*
|
||||
* Create a Greybus connection, representing the bidirectional link
|
||||
* between a CPort on a (local) Greybus host device and a CPort on
|
||||
* another Greybus interface.
|
||||
*
|
||||
* A connection also maintains the state of operations sent over the
|
||||
* connection.
|
||||
*
|
||||
* Serialised against concurrent create and destroy using the
|
||||
* gb_connection_mutex.
|
||||
*
|
||||
* Return: A pointer to the new connection if successful, or an ERR_PTR
|
||||
* otherwise.
|
||||
*/
|
||||
static struct gb_connection *
|
||||
_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
|
||||
struct gb_interface *intf,
|
||||
struct gb_bundle *bundle, int cport_id,
|
||||
gb_request_handler_t handler,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct gb_connection *connection;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&gb_connection_mutex);
|
||||
|
||||
if (intf && gb_connection_cport_in_use(intf, cport_id)) {
|
||||
dev_err(&intf->dev, "cport %u already in use\n", cport_id);
|
||||
ret = -EBUSY;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
|
||||
if (ret < 0) {
|
||||
dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
|
||||
goto err_unlock;
|
||||
}
|
||||
hd_cport_id = ret;
|
||||
|
||||
connection = kzalloc(sizeof(*connection), GFP_KERNEL);
|
||||
if (!connection) {
|
||||
ret = -ENOMEM;
|
||||
goto err_hd_cport_release;
|
||||
}
|
||||
|
||||
connection->hd_cport_id = hd_cport_id;
|
||||
connection->intf_cport_id = cport_id;
|
||||
connection->hd = hd;
|
||||
connection->intf = intf;
|
||||
connection->bundle = bundle;
|
||||
connection->handler = handler;
|
||||
connection->flags = flags;
|
||||
if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
|
||||
connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
|
||||
connection->state = GB_CONNECTION_STATE_DISABLED;
|
||||
|
||||
atomic_set(&connection->op_cycle, 0);
|
||||
mutex_init(&connection->mutex);
|
||||
spin_lock_init(&connection->lock);
|
||||
INIT_LIST_HEAD(&connection->operations);
|
||||
|
||||
connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
|
||||
dev_name(&hd->dev), hd_cport_id);
|
||||
if (!connection->wq) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_connection;
|
||||
}
|
||||
|
||||
kref_init(&connection->kref);
|
||||
|
||||
gb_connection_init_name(connection);
|
||||
|
||||
spin_lock_irq(&gb_connections_lock);
|
||||
list_add(&connection->hd_links, &hd->connections);
|
||||
|
||||
if (bundle)
|
||||
list_add(&connection->bundle_links, &bundle->connections);
|
||||
else
|
||||
INIT_LIST_HEAD(&connection->bundle_links);
|
||||
|
||||
spin_unlock_irq(&gb_connections_lock);
|
||||
|
||||
mutex_unlock(&gb_connection_mutex);
|
||||
|
||||
trace_gb_connection_create(connection);
|
||||
|
||||
return connection;
|
||||
|
||||
err_free_connection:
|
||||
kfree(connection);
|
||||
err_hd_cport_release:
|
||||
gb_hd_cport_release(hd, hd_cport_id);
|
||||
err_unlock:
|
||||
mutex_unlock(&gb_connection_mutex);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct gb_connection *
|
||||
gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
|
||||
gb_request_handler_t handler)
|
||||
{
|
||||
return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
|
||||
GB_CONNECTION_FLAG_HIGH_PRIO);
|
||||
}
|
||||
|
||||
struct gb_connection *
|
||||
gb_connection_create_control(struct gb_interface *intf)
|
||||
{
|
||||
return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
|
||||
GB_CONNECTION_FLAG_CONTROL |
|
||||
GB_CONNECTION_FLAG_HIGH_PRIO);
|
||||
}
|
||||
|
||||
struct gb_connection *
|
||||
gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
|
||||
gb_request_handler_t handler)
|
||||
{
|
||||
struct gb_interface *intf = bundle->intf;
|
||||
|
||||
return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
|
||||
handler, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_create);
|
||||
|
||||
struct gb_connection *
|
||||
gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
|
||||
gb_request_handler_t handler,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct gb_interface *intf = bundle->intf;
|
||||
|
||||
if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
|
||||
flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
|
||||
|
||||
return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
|
||||
handler, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_create_flags);
|
||||
|
||||
struct gb_connection *
|
||||
gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
|
||||
unsigned long flags)
|
||||
{
|
||||
flags |= GB_CONNECTION_FLAG_OFFLOADED;
|
||||
|
||||
return gb_connection_create_flags(bundle, cport_id, NULL, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
|
||||
|
||||
static int gb_connection_hd_cport_enable(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_host_device *hd = connection->hd;
|
||||
int ret;
|
||||
|
||||
if (!hd->driver->cport_enable)
|
||||
return 0;
|
||||
|
||||
ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
|
||||
connection->flags);
|
||||
if (ret) {
|
||||
dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
|
||||
connection->name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gb_connection_hd_cport_disable(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_host_device *hd = connection->hd;
|
||||
int ret;
|
||||
|
||||
if (!hd->driver->cport_disable)
|
||||
return;
|
||||
|
||||
ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
|
||||
if (ret) {
|
||||
dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
|
||||
connection->name, ret);
|
||||
}
|
||||
}
|
||||
|
||||
static int gb_connection_hd_cport_connected(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_host_device *hd = connection->hd;
|
||||
int ret;
|
||||
|
||||
if (!hd->driver->cport_connected)
|
||||
return 0;
|
||||
|
||||
ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
|
||||
if (ret) {
|
||||
dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
|
||||
connection->name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gb_connection_hd_cport_flush(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_host_device *hd = connection->hd;
|
||||
int ret;
|
||||
|
||||
if (!hd->driver->cport_flush)
|
||||
return 0;
|
||||
|
||||
ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
|
||||
if (ret) {
|
||||
dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
|
||||
connection->name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_host_device *hd = connection->hd;
|
||||
size_t peer_space;
|
||||
int ret;
|
||||
|
||||
if (!hd->driver->cport_quiesce)
|
||||
return 0;
|
||||
|
||||
peer_space = sizeof(struct gb_operation_msg_hdr) +
|
||||
sizeof(struct gb_cport_shutdown_request);
|
||||
|
||||
if (connection->mode_switch)
|
||||
peer_space += sizeof(struct gb_operation_msg_hdr);
|
||||
|
||||
if (!hd->driver->cport_quiesce)
|
||||
return 0;
|
||||
|
||||
ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
|
||||
peer_space,
|
||||
GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
|
||||
if (ret) {
|
||||
dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
|
||||
connection->name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gb_connection_hd_cport_clear(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_host_device *hd = connection->hd;
|
||||
int ret;
|
||||
|
||||
if (!hd->driver->cport_clear)
|
||||
return 0;
|
||||
|
||||
ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
|
||||
if (ret) {
|
||||
dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
|
||||
connection->name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request the SVC to create a connection from AP's cport to interface's
|
||||
* cport.
|
||||
*/
|
||||
static int
|
||||
gb_connection_svc_connection_create(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_host_device *hd = connection->hd;
|
||||
struct gb_interface *intf;
|
||||
u8 cport_flags;
|
||||
int ret;
|
||||
|
||||
if (gb_connection_is_static(connection))
|
||||
return 0;
|
||||
|
||||
intf = connection->intf;
|
||||
|
||||
/*
|
||||
* Enable either E2EFC or CSD, unless no flow control is requested.
|
||||
*/
|
||||
cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
|
||||
if (gb_connection_flow_control_disabled(connection)) {
|
||||
cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
|
||||
} else if (gb_connection_e2efc_enabled(connection)) {
|
||||
cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
|
||||
GB_SVC_CPORT_FLAG_E2EFC;
|
||||
}
|
||||
|
||||
ret = gb_svc_connection_create(hd->svc,
|
||||
hd->svc->ap_intf_id,
|
||||
connection->hd_cport_id,
|
||||
intf->interface_id,
|
||||
connection->intf_cport_id,
|
||||
cport_flags);
|
||||
if (ret) {
|
||||
dev_err(&connection->hd->dev,
|
||||
"%s: failed to create svc connection: %d\n",
|
||||
connection->name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gb_connection_svc_connection_destroy(struct gb_connection *connection)
|
||||
{
|
||||
if (gb_connection_is_static(connection))
|
||||
return;
|
||||
|
||||
gb_svc_connection_destroy(connection->hd->svc,
|
||||
connection->hd->svc->ap_intf_id,
|
||||
connection->hd_cport_id,
|
||||
connection->intf->interface_id,
|
||||
connection->intf_cport_id);
|
||||
}
|
||||
|
||||
/* Inform Interface about active CPorts */
|
||||
static int gb_connection_control_connected(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_control *control;
|
||||
u16 cport_id = connection->intf_cport_id;
|
||||
int ret;
|
||||
|
||||
if (gb_connection_is_static(connection))
|
||||
return 0;
|
||||
|
||||
if (gb_connection_is_control(connection))
|
||||
return 0;
|
||||
|
||||
control = connection->intf->control;
|
||||
|
||||
ret = gb_control_connected_operation(control, cport_id);
|
||||
if (ret) {
|
||||
dev_err(&connection->bundle->dev,
|
||||
"failed to connect cport: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gb_connection_control_disconnecting(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_control *control;
|
||||
u16 cport_id = connection->intf_cport_id;
|
||||
int ret;
|
||||
|
||||
if (gb_connection_is_static(connection))
|
||||
return;
|
||||
|
||||
control = connection->intf->control;
|
||||
|
||||
ret = gb_control_disconnecting_operation(control, cport_id);
|
||||
if (ret) {
|
||||
dev_err(&connection->hd->dev,
|
||||
"%s: failed to send disconnecting: %d\n",
|
||||
connection->name, ret);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gb_connection_control_disconnected(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_control *control;
|
||||
u16 cport_id = connection->intf_cport_id;
|
||||
int ret;
|
||||
|
||||
if (gb_connection_is_static(connection))
|
||||
return;
|
||||
|
||||
control = connection->intf->control;
|
||||
|
||||
if (gb_connection_is_control(connection)) {
|
||||
if (connection->mode_switch) {
|
||||
ret = gb_control_mode_switch_operation(control);
|
||||
if (ret) {
|
||||
/*
|
||||
* Allow mode switch to time out waiting for
|
||||
* mailbox event.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
ret = gb_control_disconnected_operation(control, cport_id);
|
||||
if (ret) {
|
||||
dev_warn(&connection->bundle->dev,
|
||||
"failed to disconnect cport: %d\n", ret);
|
||||
}
|
||||
}
|
||||
|
||||
static int gb_connection_shutdown_operation(struct gb_connection *connection,
|
||||
u8 phase)
|
||||
{
|
||||
struct gb_cport_shutdown_request *req;
|
||||
struct gb_operation *operation;
|
||||
int ret;
|
||||
|
||||
operation = gb_operation_create_core(connection,
|
||||
GB_REQUEST_TYPE_CPORT_SHUTDOWN,
|
||||
sizeof(*req), 0, 0,
|
||||
GFP_KERNEL);
|
||||
if (!operation)
|
||||
return -ENOMEM;
|
||||
|
||||
req = operation->request->payload;
|
||||
req->phase = phase;
|
||||
|
||||
ret = gb_operation_request_send_sync(operation);
|
||||
|
||||
gb_operation_put(operation);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gb_connection_cport_shutdown(struct gb_connection *connection,
|
||||
u8 phase)
|
||||
{
|
||||
struct gb_host_device *hd = connection->hd;
|
||||
const struct gb_hd_driver *drv = hd->driver;
|
||||
int ret;
|
||||
|
||||
if (gb_connection_is_static(connection))
|
||||
return 0;
|
||||
|
||||
if (gb_connection_is_offloaded(connection)) {
|
||||
if (!drv->cport_shutdown)
|
||||
return 0;
|
||||
|
||||
ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
|
||||
GB_OPERATION_TIMEOUT_DEFAULT);
|
||||
} else {
|
||||
ret = gb_connection_shutdown_operation(connection, phase);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
|
||||
connection->name, phase, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
|
||||
{
|
||||
return gb_connection_cport_shutdown(connection, 1);
|
||||
}
|
||||
|
||||
static int
|
||||
gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
|
||||
{
|
||||
return gb_connection_cport_shutdown(connection, 2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Cancel all active operations on a connection.
|
||||
*
|
||||
* Locking: Called with connection lock held and state set to DISABLED or
|
||||
* DISCONNECTING.
|
||||
*/
|
||||
static void gb_connection_cancel_operations(struct gb_connection *connection,
|
||||
int errno)
|
||||
__must_hold(&connection->lock)
|
||||
{
|
||||
struct gb_operation *operation;
|
||||
|
||||
while (!list_empty(&connection->operations)) {
|
||||
operation = list_last_entry(&connection->operations,
|
||||
struct gb_operation, links);
|
||||
gb_operation_get(operation);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
if (gb_operation_is_incoming(operation))
|
||||
gb_operation_cancel_incoming(operation, errno);
|
||||
else
|
||||
gb_operation_cancel(operation, errno);
|
||||
|
||||
gb_operation_put(operation);
|
||||
|
||||
spin_lock_irq(&connection->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Cancel all active incoming operations on a connection.
|
||||
*
|
||||
* Locking: Called with connection lock held and state set to ENABLED_TX.
|
||||
*/
|
||||
static void
|
||||
gb_connection_flush_incoming_operations(struct gb_connection *connection,
|
||||
int errno)
|
||||
__must_hold(&connection->lock)
|
||||
{
|
||||
struct gb_operation *operation;
|
||||
bool incoming;
|
||||
|
||||
while (!list_empty(&connection->operations)) {
|
||||
incoming = false;
|
||||
list_for_each_entry(operation, &connection->operations,
|
||||
links) {
|
||||
if (gb_operation_is_incoming(operation)) {
|
||||
gb_operation_get(operation);
|
||||
incoming = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!incoming)
|
||||
break;
|
||||
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
/* FIXME: flush, not cancel? */
|
||||
gb_operation_cancel_incoming(operation, errno);
|
||||
gb_operation_put(operation);
|
||||
|
||||
spin_lock_irq(&connection->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* _gb_connection_enable() - enable a connection
|
||||
* @connection: connection to enable
|
||||
* @rx: whether to enable incoming requests
|
||||
*
|
||||
* Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
|
||||
* ENABLED_TX->ENABLED state transitions.
|
||||
*
|
||||
* Locking: Caller holds connection->mutex.
|
||||
*/
|
||||
static int _gb_connection_enable(struct gb_connection *connection, bool rx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Handle ENABLED_TX -> ENABLED transitions. */
|
||||
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
|
||||
if (!(connection->handler && rx))
|
||||
return 0;
|
||||
|
||||
spin_lock_irq(&connection->lock);
|
||||
connection->state = GB_CONNECTION_STATE_ENABLED;
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = gb_connection_hd_cport_enable(connection);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = gb_connection_svc_connection_create(connection);
|
||||
if (ret)
|
||||
goto err_hd_cport_clear;
|
||||
|
||||
ret = gb_connection_hd_cport_connected(connection);
|
||||
if (ret)
|
||||
goto err_svc_connection_destroy;
|
||||
|
||||
spin_lock_irq(&connection->lock);
|
||||
if (connection->handler && rx)
|
||||
connection->state = GB_CONNECTION_STATE_ENABLED;
|
||||
else
|
||||
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
ret = gb_connection_control_connected(connection);
|
||||
if (ret)
|
||||
goto err_control_disconnecting;
|
||||
|
||||
return 0;
|
||||
|
||||
err_control_disconnecting:
|
||||
spin_lock_irq(&connection->lock);
|
||||
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
|
||||
gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
/* Transmit queue should already be empty. */
|
||||
gb_connection_hd_cport_flush(connection);
|
||||
|
||||
gb_connection_control_disconnecting(connection);
|
||||
gb_connection_cport_shutdown_phase_1(connection);
|
||||
gb_connection_hd_cport_quiesce(connection);
|
||||
gb_connection_cport_shutdown_phase_2(connection);
|
||||
gb_connection_control_disconnected(connection);
|
||||
connection->state = GB_CONNECTION_STATE_DISABLED;
|
||||
err_svc_connection_destroy:
|
||||
gb_connection_svc_connection_destroy(connection);
|
||||
err_hd_cport_clear:
|
||||
gb_connection_hd_cport_clear(connection);
|
||||
|
||||
gb_connection_hd_cport_disable(connection);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gb_connection_enable(struct gb_connection *connection)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&connection->mutex);
|
||||
|
||||
if (connection->state == GB_CONNECTION_STATE_ENABLED)
|
||||
goto out_unlock;
|
||||
|
||||
ret = _gb_connection_enable(connection, true);
|
||||
if (!ret)
|
||||
trace_gb_connection_enable(connection);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&connection->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_enable);
|
||||
|
||||
int gb_connection_enable_tx(struct gb_connection *connection)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&connection->mutex);
|
||||
|
||||
if (connection->state == GB_CONNECTION_STATE_ENABLED) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
|
||||
goto out_unlock;
|
||||
|
||||
ret = _gb_connection_enable(connection, false);
|
||||
if (!ret)
|
||||
trace_gb_connection_enable(connection);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&connection->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
|
||||
|
||||
void gb_connection_disable_rx(struct gb_connection *connection)
|
||||
{
|
||||
mutex_lock(&connection->mutex);
|
||||
|
||||
spin_lock_irq(&connection->lock);
|
||||
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
|
||||
spin_unlock_irq(&connection->lock);
|
||||
goto out_unlock;
|
||||
}
|
||||
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
|
||||
gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
trace_gb_connection_disable(connection);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&connection->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
|
||||
|
||||
void gb_connection_mode_switch_prepare(struct gb_connection *connection)
|
||||
{
|
||||
connection->mode_switch = true;
|
||||
}
|
||||
|
||||
void gb_connection_mode_switch_complete(struct gb_connection *connection)
|
||||
{
|
||||
gb_connection_svc_connection_destroy(connection);
|
||||
gb_connection_hd_cport_clear(connection);
|
||||
|
||||
gb_connection_hd_cport_disable(connection);
|
||||
|
||||
connection->mode_switch = false;
|
||||
}
|
||||
|
||||
void gb_connection_disable(struct gb_connection *connection)
|
||||
{
|
||||
mutex_lock(&connection->mutex);
|
||||
|
||||
if (connection->state == GB_CONNECTION_STATE_DISABLED)
|
||||
goto out_unlock;
|
||||
|
||||
trace_gb_connection_disable(connection);
|
||||
|
||||
spin_lock_irq(&connection->lock);
|
||||
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
|
||||
gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
gb_connection_hd_cport_flush(connection);
|
||||
|
||||
gb_connection_control_disconnecting(connection);
|
||||
gb_connection_cport_shutdown_phase_1(connection);
|
||||
gb_connection_hd_cport_quiesce(connection);
|
||||
gb_connection_cport_shutdown_phase_2(connection);
|
||||
gb_connection_control_disconnected(connection);
|
||||
|
||||
connection->state = GB_CONNECTION_STATE_DISABLED;
|
||||
|
||||
/* control-connection tear down is deferred when mode switching */
|
||||
if (!connection->mode_switch) {
|
||||
gb_connection_svc_connection_destroy(connection);
|
||||
gb_connection_hd_cport_clear(connection);
|
||||
|
||||
gb_connection_hd_cport_disable(connection);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&connection->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_disable);
|
||||
|
||||
/* Disable a connection without communicating with the remote end. */
|
||||
void gb_connection_disable_forced(struct gb_connection *connection)
|
||||
{
|
||||
mutex_lock(&connection->mutex);
|
||||
|
||||
if (connection->state == GB_CONNECTION_STATE_DISABLED)
|
||||
goto out_unlock;
|
||||
|
||||
trace_gb_connection_disable(connection);
|
||||
|
||||
spin_lock_irq(&connection->lock);
|
||||
connection->state = GB_CONNECTION_STATE_DISABLED;
|
||||
gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
gb_connection_hd_cport_flush(connection);
|
||||
|
||||
gb_connection_svc_connection_destroy(connection);
|
||||
gb_connection_hd_cport_clear(connection);
|
||||
|
||||
gb_connection_hd_cport_disable(connection);
|
||||
out_unlock:
|
||||
mutex_unlock(&connection->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
|
||||
|
||||
/* Caller must have disabled the connection before destroying it. */
|
||||
void gb_connection_destroy(struct gb_connection *connection)
|
||||
{
|
||||
if (!connection)
|
||||
return;
|
||||
|
||||
if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
|
||||
gb_connection_disable(connection);
|
||||
|
||||
mutex_lock(&gb_connection_mutex);
|
||||
|
||||
spin_lock_irq(&gb_connections_lock);
|
||||
list_del(&connection->bundle_links);
|
||||
list_del(&connection->hd_links);
|
||||
spin_unlock_irq(&gb_connections_lock);
|
||||
|
||||
destroy_workqueue(connection->wq);
|
||||
|
||||
gb_hd_cport_release(connection->hd, connection->hd_cport_id);
|
||||
connection->hd_cport_id = CPORT_ID_BAD;
|
||||
|
||||
mutex_unlock(&gb_connection_mutex);
|
||||
|
||||
gb_connection_put(connection);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_destroy);
|
||||
|
||||
void gb_connection_latency_tag_enable(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_host_device *hd = connection->hd;
|
||||
int ret;
|
||||
|
||||
if (!hd->driver->latency_tag_enable)
|
||||
return;
|
||||
|
||||
ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
|
||||
if (ret) {
|
||||
dev_err(&connection->hd->dev,
|
||||
"%s: failed to enable latency tag: %d\n",
|
||||
connection->name, ret);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
|
||||
|
||||
void gb_connection_latency_tag_disable(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_host_device *hd = connection->hd;
|
||||
int ret;
|
||||
|
||||
if (!hd->driver->latency_tag_disable)
|
||||
return;
|
||||
|
||||
ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
|
||||
if (ret) {
|
||||
dev_err(&connection->hd->dev,
|
||||
"%s: failed to disable latency tag: %d\n",
|
||||
connection->name, ret);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
|
584
drivers/greybus/control.c
普通文件
584
drivers/greybus/control.c
普通文件
@@ -0,0 +1,584 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Greybus CPort control protocol.
|
||||
*
|
||||
* Copyright 2015 Google Inc.
|
||||
* Copyright 2015 Linaro Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/greybus.h>
|
||||
|
||||
/* Highest control-protocol version supported */
|
||||
#define GB_CONTROL_VERSION_MAJOR 0
|
||||
#define GB_CONTROL_VERSION_MINOR 1
|
||||
|
||||
static int gb_control_get_version(struct gb_control *control)
|
||||
{
|
||||
struct gb_interface *intf = control->connection->intf;
|
||||
struct gb_control_version_request request;
|
||||
struct gb_control_version_response response;
|
||||
int ret;
|
||||
|
||||
request.major = GB_CONTROL_VERSION_MAJOR;
|
||||
request.minor = GB_CONTROL_VERSION_MINOR;
|
||||
|
||||
ret = gb_operation_sync(control->connection,
|
||||
GB_CONTROL_TYPE_VERSION,
|
||||
&request, sizeof(request), &response,
|
||||
sizeof(response));
|
||||
if (ret) {
|
||||
dev_err(&intf->dev,
|
||||
"failed to get control-protocol version: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (response.major > request.major) {
|
||||
dev_err(&intf->dev,
|
||||
"unsupported major control-protocol version (%u > %u)\n",
|
||||
response.major, request.major);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
control->protocol_major = response.major;
|
||||
control->protocol_minor = response.minor;
|
||||
|
||||
dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
|
||||
response.minor);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gb_control_get_bundle_version(struct gb_control *control,
|
||||
struct gb_bundle *bundle)
|
||||
{
|
||||
struct gb_interface *intf = control->connection->intf;
|
||||
struct gb_control_bundle_version_request request;
|
||||
struct gb_control_bundle_version_response response;
|
||||
int ret;
|
||||
|
||||
request.bundle_id = bundle->id;
|
||||
|
||||
ret = gb_operation_sync(control->connection,
|
||||
GB_CONTROL_TYPE_BUNDLE_VERSION,
|
||||
&request, sizeof(request),
|
||||
&response, sizeof(response));
|
||||
if (ret) {
|
||||
dev_err(&intf->dev,
|
||||
"failed to get bundle %u class version: %d\n",
|
||||
bundle->id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bundle->class_major = response.major;
|
||||
bundle->class_minor = response.minor;
|
||||
|
||||
dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
|
||||
response.major, response.minor);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gb_control_get_bundle_versions(struct gb_control *control)
|
||||
{
|
||||
struct gb_interface *intf = control->connection->intf;
|
||||
struct gb_bundle *bundle;
|
||||
int ret;
|
||||
|
||||
if (!control->has_bundle_version)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(bundle, &intf->bundles, links) {
|
||||
ret = gb_control_get_bundle_version(control, bundle);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get Manifest's size from the interface */
|
||||
int gb_control_get_manifest_size_operation(struct gb_interface *intf)
|
||||
{
|
||||
struct gb_control_get_manifest_size_response response;
|
||||
struct gb_connection *connection = intf->control->connection;
|
||||
int ret;
|
||||
|
||||
ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
|
||||
NULL, 0, &response, sizeof(response));
|
||||
if (ret) {
|
||||
dev_err(&connection->intf->dev,
|
||||
"failed to get manifest size: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return le16_to_cpu(response.size);
|
||||
}
|
||||
|
||||
/* Reads Manifest from the interface */
|
||||
int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
|
||||
size_t size)
|
||||
{
|
||||
struct gb_connection *connection = intf->control->connection;
|
||||
|
||||
return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
|
||||
NULL, 0, manifest, size);
|
||||
}
|
||||
|
||||
int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
|
||||
{
|
||||
struct gb_control_connected_request request;
|
||||
|
||||
request.cport_id = cpu_to_le16(cport_id);
|
||||
return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
|
||||
&request, sizeof(request), NULL, 0);
|
||||
}
|
||||
|
||||
int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
|
||||
{
|
||||
struct gb_control_disconnected_request request;
|
||||
|
||||
request.cport_id = cpu_to_le16(cport_id);
|
||||
return gb_operation_sync(control->connection,
|
||||
GB_CONTROL_TYPE_DISCONNECTED, &request,
|
||||
sizeof(request), NULL, 0);
|
||||
}
|
||||
|
||||
int gb_control_disconnecting_operation(struct gb_control *control,
|
||||
u16 cport_id)
|
||||
{
|
||||
struct gb_control_disconnecting_request *request;
|
||||
struct gb_operation *operation;
|
||||
int ret;
|
||||
|
||||
operation = gb_operation_create_core(control->connection,
|
||||
GB_CONTROL_TYPE_DISCONNECTING,
|
||||
sizeof(*request), 0, 0,
|
||||
GFP_KERNEL);
|
||||
if (!operation)
|
||||
return -ENOMEM;
|
||||
|
||||
request = operation->request->payload;
|
||||
request->cport_id = cpu_to_le16(cport_id);
|
||||
|
||||
ret = gb_operation_request_send_sync(operation);
|
||||
if (ret) {
|
||||
dev_err(&control->dev, "failed to send disconnecting: %d\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
gb_operation_put(operation);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gb_control_mode_switch_operation(struct gb_control *control)
|
||||
{
|
||||
struct gb_operation *operation;
|
||||
int ret;
|
||||
|
||||
operation = gb_operation_create_core(control->connection,
|
||||
GB_CONTROL_TYPE_MODE_SWITCH,
|
||||
0, 0,
|
||||
GB_OPERATION_FLAG_UNIDIRECTIONAL,
|
||||
GFP_KERNEL);
|
||||
if (!operation)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = gb_operation_request_send_sync(operation);
|
||||
if (ret)
|
||||
dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
|
||||
|
||||
gb_operation_put(operation);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gb_control_bundle_pm_status_map(u8 status)
|
||||
{
|
||||
switch (status) {
|
||||
case GB_CONTROL_BUNDLE_PM_INVAL:
|
||||
return -EINVAL;
|
||||
case GB_CONTROL_BUNDLE_PM_BUSY:
|
||||
return -EBUSY;
|
||||
case GB_CONTROL_BUNDLE_PM_NA:
|
||||
return -ENOMSG;
|
||||
case GB_CONTROL_BUNDLE_PM_FAIL:
|
||||
default:
|
||||
return -EREMOTEIO;
|
||||
}
|
||||
}
|
||||
|
||||
int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
|
||||
{
|
||||
struct gb_control_bundle_pm_request request;
|
||||
struct gb_control_bundle_pm_response response;
|
||||
int ret;
|
||||
|
||||
request.bundle_id = bundle_id;
|
||||
ret = gb_operation_sync(control->connection,
|
||||
GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
|
||||
sizeof(request), &response, sizeof(response));
|
||||
if (ret) {
|
||||
dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
|
||||
bundle_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
|
||||
dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
|
||||
bundle_id, response.status);
|
||||
return gb_control_bundle_pm_status_map(response.status);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
|
||||
{
|
||||
struct gb_control_bundle_pm_request request;
|
||||
struct gb_control_bundle_pm_response response;
|
||||
int ret;
|
||||
|
||||
request.bundle_id = bundle_id;
|
||||
ret = gb_operation_sync(control->connection,
|
||||
GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
|
||||
sizeof(request), &response, sizeof(response));
|
||||
if (ret) {
|
||||
dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
|
||||
bundle_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
|
||||
dev_err(&control->dev, "failed to resume bundle %u: %d\n",
|
||||
bundle_id, response.status);
|
||||
return gb_control_bundle_pm_status_map(response.status);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
|
||||
{
|
||||
struct gb_control_bundle_pm_request request;
|
||||
struct gb_control_bundle_pm_response response;
|
||||
int ret;
|
||||
|
||||
request.bundle_id = bundle_id;
|
||||
ret = gb_operation_sync(control->connection,
|
||||
GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
|
||||
sizeof(request), &response, sizeof(response));
|
||||
if (ret) {
|
||||
dev_err(&control->dev,
|
||||
"failed to send bundle %u deactivate: %d\n", bundle_id,
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
|
||||
dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
|
||||
bundle_id, response.status);
|
||||
return gb_control_bundle_pm_status_map(response.status);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
|
||||
{
|
||||
struct gb_control_bundle_pm_request request;
|
||||
struct gb_control_bundle_pm_response response;
|
||||
int ret;
|
||||
|
||||
if (!control->has_bundle_activate)
|
||||
return 0;
|
||||
|
||||
request.bundle_id = bundle_id;
|
||||
ret = gb_operation_sync(control->connection,
|
||||
GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
|
||||
sizeof(request), &response, sizeof(response));
|
||||
if (ret) {
|
||||
dev_err(&control->dev,
|
||||
"failed to send bundle %u activate: %d\n", bundle_id,
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
|
||||
dev_err(&control->dev, "failed to activate bundle %u: %d\n",
|
||||
bundle_id, response.status);
|
||||
return gb_control_bundle_pm_status_map(response.status);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gb_control_interface_pm_status_map(u8 status)
|
||||
{
|
||||
switch (status) {
|
||||
case GB_CONTROL_INTF_PM_BUSY:
|
||||
return -EBUSY;
|
||||
case GB_CONTROL_INTF_PM_NA:
|
||||
return -ENOMSG;
|
||||
default:
|
||||
return -EREMOTEIO;
|
||||
}
|
||||
}
|
||||
|
||||
int gb_control_interface_suspend_prepare(struct gb_control *control)
|
||||
{
|
||||
struct gb_control_intf_pm_response response;
|
||||
int ret;
|
||||
|
||||
ret = gb_operation_sync(control->connection,
|
||||
GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
|
||||
&response, sizeof(response));
|
||||
if (ret) {
|
||||
dev_err(&control->dev,
|
||||
"failed to send interface suspend prepare: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (response.status != GB_CONTROL_INTF_PM_OK) {
|
||||
dev_err(&control->dev, "interface error while preparing suspend: %d\n",
|
||||
response.status);
|
||||
return gb_control_interface_pm_status_map(response.status);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gb_control_interface_deactivate_prepare(struct gb_control *control)
|
||||
{
|
||||
struct gb_control_intf_pm_response response;
|
||||
int ret;
|
||||
|
||||
ret = gb_operation_sync(control->connection,
|
||||
GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
|
||||
0, &response, sizeof(response));
|
||||
if (ret) {
|
||||
dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (response.status != GB_CONTROL_INTF_PM_OK) {
|
||||
dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
|
||||
response.status);
|
||||
return gb_control_interface_pm_status_map(response.status);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gb_control_interface_hibernate_abort(struct gb_control *control)
|
||||
{
|
||||
struct gb_control_intf_pm_response response;
|
||||
int ret;
|
||||
|
||||
ret = gb_operation_sync(control->connection,
|
||||
GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
|
||||
&response, sizeof(response));
|
||||
if (ret) {
|
||||
dev_err(&control->dev,
|
||||
"failed to send interface aborting hibernate: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (response.status != GB_CONTROL_INTF_PM_OK) {
|
||||
dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
|
||||
response.status);
|
||||
return gb_control_interface_pm_status_map(response.status);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t vendor_string_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gb_control *control = to_gb_control(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
|
||||
}
|
||||
static DEVICE_ATTR_RO(vendor_string);
|
||||
|
||||
static ssize_t product_string_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gb_control *control = to_gb_control(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
|
||||
}
|
||||
static DEVICE_ATTR_RO(product_string);
|
||||
|
||||
static struct attribute *control_attrs[] = {
|
||||
&dev_attr_vendor_string.attr,
|
||||
&dev_attr_product_string.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(control);
|
||||
|
||||
static void gb_control_release(struct device *dev)
|
||||
{
|
||||
struct gb_control *control = to_gb_control(dev);
|
||||
|
||||
gb_connection_destroy(control->connection);
|
||||
|
||||
kfree(control->vendor_string);
|
||||
kfree(control->product_string);
|
||||
|
||||
kfree(control);
|
||||
}
|
||||
|
||||
struct device_type greybus_control_type = {
|
||||
.name = "greybus_control",
|
||||
.release = gb_control_release,
|
||||
};
|
||||
|
||||
struct gb_control *gb_control_create(struct gb_interface *intf)
|
||||
{
|
||||
struct gb_connection *connection;
|
||||
struct gb_control *control;
|
||||
|
||||
control = kzalloc(sizeof(*control), GFP_KERNEL);
|
||||
if (!control)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
control->intf = intf;
|
||||
|
||||
connection = gb_connection_create_control(intf);
|
||||
if (IS_ERR(connection)) {
|
||||
dev_err(&intf->dev,
|
||||
"failed to create control connection: %ld\n",
|
||||
PTR_ERR(connection));
|
||||
kfree(control);
|
||||
return ERR_CAST(connection);
|
||||
}
|
||||
|
||||
control->connection = connection;
|
||||
|
||||
control->dev.parent = &intf->dev;
|
||||
control->dev.bus = &greybus_bus_type;
|
||||
control->dev.type = &greybus_control_type;
|
||||
control->dev.groups = control_groups;
|
||||
control->dev.dma_mask = intf->dev.dma_mask;
|
||||
device_initialize(&control->dev);
|
||||
dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
|
||||
|
||||
gb_connection_set_data(control->connection, control);
|
||||
|
||||
return control;
|
||||
}
|
||||
|
||||
int gb_control_enable(struct gb_control *control)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
|
||||
|
||||
ret = gb_connection_enable_tx(control->connection);
|
||||
if (ret) {
|
||||
dev_err(&control->connection->intf->dev,
|
||||
"failed to enable control connection: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = gb_control_get_version(control);
|
||||
if (ret)
|
||||
goto err_disable_connection;
|
||||
|
||||
if (control->protocol_major > 0 || control->protocol_minor > 1)
|
||||
control->has_bundle_version = true;
|
||||
|
||||
/* FIXME: use protocol version instead */
|
||||
if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
|
||||
control->has_bundle_activate = true;
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_connection:
|
||||
gb_connection_disable(control->connection);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void gb_control_disable(struct gb_control *control)
|
||||
{
|
||||
dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
|
||||
|
||||
if (control->intf->disconnected)
|
||||
gb_connection_disable_forced(control->connection);
|
||||
else
|
||||
gb_connection_disable(control->connection);
|
||||
}
|
||||
|
||||
int gb_control_suspend(struct gb_control *control)
|
||||
{
|
||||
gb_connection_disable(control->connection);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gb_control_resume(struct gb_control *control)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = gb_connection_enable_tx(control->connection);
|
||||
if (ret) {
|
||||
dev_err(&control->connection->intf->dev,
|
||||
"failed to enable control connection: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gb_control_add(struct gb_control *control)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = device_add(&control->dev);
|
||||
if (ret) {
|
||||
dev_err(&control->dev,
|
||||
"failed to register control device: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gb_control_del(struct gb_control *control)
|
||||
{
|
||||
if (device_is_registered(&control->dev))
|
||||
device_del(&control->dev);
|
||||
}
|
||||
|
||||
struct gb_control *gb_control_get(struct gb_control *control)
|
||||
{
|
||||
get_device(&control->dev);
|
||||
|
||||
return control;
|
||||
}
|
||||
|
||||
void gb_control_put(struct gb_control *control)
|
||||
{
|
||||
put_device(&control->dev);
|
||||
}
|
||||
|
||||
void gb_control_mode_switch_prepare(struct gb_control *control)
|
||||
{
|
||||
gb_connection_mode_switch_prepare(control->connection);
|
||||
}
|
||||
|
||||
void gb_control_mode_switch_complete(struct gb_control *control)
|
||||
{
|
||||
gb_connection_mode_switch_complete(control->connection);
|
||||
}
|
349
drivers/greybus/core.c
普通文件
349
drivers/greybus/core.c
普通文件
@@ -0,0 +1,349 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Greybus "Core"
|
||||
*
|
||||
* Copyright 2014-2015 Google Inc.
|
||||
* Copyright 2014-2015 Linaro Ltd.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <linux/greybus.h>
|
||||
#include "greybus_trace.h"
|
||||
|
||||
#define GB_BUNDLE_AUTOSUSPEND_MS 3000
|
||||
|
||||
/* Allow greybus to be disabled at boot if needed */
|
||||
static bool nogreybus;
|
||||
#ifdef MODULE
|
||||
module_param(nogreybus, bool, 0444);
|
||||
#else
|
||||
core_param(nogreybus, nogreybus, bool, 0444);
|
||||
#endif
|
||||
int greybus_disabled(void)
|
||||
{
|
||||
return nogreybus;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(greybus_disabled);
|
||||
|
||||
static bool greybus_match_one_id(struct gb_bundle *bundle,
|
||||
const struct greybus_bundle_id *id)
|
||||
{
|
||||
if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) &&
|
||||
(id->vendor != bundle->intf->vendor_id))
|
||||
return false;
|
||||
|
||||
if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) &&
|
||||
(id->product != bundle->intf->product_id))
|
||||
return false;
|
||||
|
||||
if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) &&
|
||||
(id->class != bundle->class))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct greybus_bundle_id *
|
||||
greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id)
|
||||
{
|
||||
if (!id)
|
||||
return NULL;
|
||||
|
||||
for (; id->vendor || id->product || id->class || id->driver_info;
|
||||
id++) {
|
||||
if (greybus_match_one_id(bundle, id))
|
||||
return id;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int greybus_match_device(struct device *dev, struct device_driver *drv)
|
||||
{
|
||||
struct greybus_driver *driver = to_greybus_driver(drv);
|
||||
struct gb_bundle *bundle;
|
||||
const struct greybus_bundle_id *id;
|
||||
|
||||
if (!is_gb_bundle(dev))
|
||||
return 0;
|
||||
|
||||
bundle = to_gb_bundle(dev);
|
||||
|
||||
id = greybus_match_id(bundle, driver->id_table);
|
||||
if (id)
|
||||
return 1;
|
||||
/* FIXME - Dynamic ids? */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct gb_host_device *hd;
|
||||
struct gb_module *module = NULL;
|
||||
struct gb_interface *intf = NULL;
|
||||
struct gb_control *control = NULL;
|
||||
struct gb_bundle *bundle = NULL;
|
||||
struct gb_svc *svc = NULL;
|
||||
|
||||
if (is_gb_host_device(dev)) {
|
||||
hd = to_gb_host_device(dev);
|
||||
} else if (is_gb_module(dev)) {
|
||||
module = to_gb_module(dev);
|
||||
hd = module->hd;
|
||||
} else if (is_gb_interface(dev)) {
|
||||
intf = to_gb_interface(dev);
|
||||
module = intf->module;
|
||||
hd = intf->hd;
|
||||
} else if (is_gb_control(dev)) {
|
||||
control = to_gb_control(dev);
|
||||
intf = control->intf;
|
||||
module = intf->module;
|
||||
hd = intf->hd;
|
||||
} else if (is_gb_bundle(dev)) {
|
||||
bundle = to_gb_bundle(dev);
|
||||
intf = bundle->intf;
|
||||
module = intf->module;
|
||||
hd = intf->hd;
|
||||
} else if (is_gb_svc(dev)) {
|
||||
svc = to_gb_svc(dev);
|
||||
hd = svc->hd;
|
||||
} else {
|
||||
dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (add_uevent_var(env, "BUS=%u", hd->bus_id))
|
||||
return -ENOMEM;
|
||||
|
||||
if (module) {
|
||||
if (add_uevent_var(env, "MODULE=%u", module->module_id))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (intf) {
|
||||
if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
|
||||
return -ENOMEM;
|
||||
if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
|
||||
intf->vendor_id, intf->product_id))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (bundle) {
|
||||
// FIXME
|
||||
// add a uevent that can "load" a bundle type
|
||||
// This is what we need to bind a driver to so use the info
|
||||
// in gmod here as well
|
||||
|
||||
if (add_uevent_var(env, "BUNDLE=%u", bundle->id))
|
||||
return -ENOMEM;
|
||||
if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void greybus_shutdown(struct device *dev)
|
||||
{
|
||||
if (is_gb_host_device(dev)) {
|
||||
struct gb_host_device *hd;
|
||||
|
||||
hd = to_gb_host_device(dev);
|
||||
gb_hd_shutdown(hd);
|
||||
}
|
||||
}
|
||||
|
||||
struct bus_type greybus_bus_type = {
|
||||
.name = "greybus",
|
||||
.match = greybus_match_device,
|
||||
.uevent = greybus_uevent,
|
||||
.shutdown = greybus_shutdown,
|
||||
};
|
||||
|
||||
static int greybus_probe(struct device *dev)
|
||||
{
|
||||
struct greybus_driver *driver = to_greybus_driver(dev->driver);
|
||||
struct gb_bundle *bundle = to_gb_bundle(dev);
|
||||
const struct greybus_bundle_id *id;
|
||||
int retval;
|
||||
|
||||
/* match id */
|
||||
id = greybus_match_id(bundle, driver->id_table);
|
||||
if (!id)
|
||||
return -ENODEV;
|
||||
|
||||
retval = pm_runtime_get_sync(&bundle->intf->dev);
|
||||
if (retval < 0) {
|
||||
pm_runtime_put_noidle(&bundle->intf->dev);
|
||||
return retval;
|
||||
}
|
||||
|
||||
retval = gb_control_bundle_activate(bundle->intf->control, bundle->id);
|
||||
if (retval) {
|
||||
pm_runtime_put(&bundle->intf->dev);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unbound bundle devices are always deactivated. During probe, the
|
||||
* Runtime PM is set to enabled and active and the usage count is
|
||||
* incremented. If the driver supports runtime PM, it should call
|
||||
* pm_runtime_put() in its probe routine and pm_runtime_get_sync()
|
||||
* in remove routine.
|
||||
*/
|
||||
pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
retval = driver->probe(bundle, id);
|
||||
if (retval) {
|
||||
/*
|
||||
* Catch buggy drivers that fail to destroy their connections.
|
||||
*/
|
||||
WARN_ON(!list_empty(&bundle->connections));
|
||||
|
||||
gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
|
||||
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_suspended(dev);
|
||||
pm_runtime_put_noidle(dev);
|
||||
pm_runtime_dont_use_autosuspend(dev);
|
||||
pm_runtime_put(&bundle->intf->dev);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
pm_runtime_put(&bundle->intf->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int greybus_remove(struct device *dev)
|
||||
{
|
||||
struct greybus_driver *driver = to_greybus_driver(dev->driver);
|
||||
struct gb_bundle *bundle = to_gb_bundle(dev);
|
||||
struct gb_connection *connection;
|
||||
int retval;
|
||||
|
||||
retval = pm_runtime_get_sync(dev);
|
||||
if (retval < 0)
|
||||
dev_err(dev, "failed to resume bundle: %d\n", retval);
|
||||
|
||||
/*
|
||||
* Disable (non-offloaded) connections early in case the interface is
|
||||
* already gone to avoid unceccessary operation timeouts during
|
||||
* driver disconnect. Otherwise, only disable incoming requests.
|
||||
*/
|
||||
list_for_each_entry(connection, &bundle->connections, bundle_links) {
|
||||
if (gb_connection_is_offloaded(connection))
|
||||
continue;
|
||||
|
||||
if (bundle->intf->disconnected)
|
||||
gb_connection_disable_forced(connection);
|
||||
else
|
||||
gb_connection_disable_rx(connection);
|
||||
}
|
||||
|
||||
driver->disconnect(bundle);
|
||||
|
||||
/* Catch buggy drivers that fail to destroy their connections. */
|
||||
WARN_ON(!list_empty(&bundle->connections));
|
||||
|
||||
if (!bundle->intf->disconnected)
|
||||
gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
|
||||
|
||||
pm_runtime_put_noidle(dev);
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_suspended(dev);
|
||||
pm_runtime_dont_use_autosuspend(dev);
|
||||
pm_runtime_put_noidle(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int greybus_register_driver(struct greybus_driver *driver, struct module *owner,
|
||||
const char *mod_name)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (greybus_disabled())
|
||||
return -ENODEV;
|
||||
|
||||
driver->driver.bus = &greybus_bus_type;
|
||||
driver->driver.name = driver->name;
|
||||
driver->driver.probe = greybus_probe;
|
||||
driver->driver.remove = greybus_remove;
|
||||
driver->driver.owner = owner;
|
||||
driver->driver.mod_name = mod_name;
|
||||
|
||||
retval = driver_register(&driver->driver);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
pr_info("registered new driver %s\n", driver->name);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(greybus_register_driver);
|
||||
|
||||
void greybus_deregister_driver(struct greybus_driver *driver)
|
||||
{
|
||||
driver_unregister(&driver->driver);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(greybus_deregister_driver);
|
||||
|
||||
static int __init gb_init(void)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (greybus_disabled())
|
||||
return -ENODEV;
|
||||
|
||||
BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD);
|
||||
|
||||
gb_debugfs_init();
|
||||
|
||||
retval = bus_register(&greybus_bus_type);
|
||||
if (retval) {
|
||||
pr_err("bus_register failed (%d)\n", retval);
|
||||
goto error_bus;
|
||||
}
|
||||
|
||||
retval = gb_hd_init();
|
||||
if (retval) {
|
||||
pr_err("gb_hd_init failed (%d)\n", retval);
|
||||
goto error_hd;
|
||||
}
|
||||
|
||||
retval = gb_operation_init();
|
||||
if (retval) {
|
||||
pr_err("gb_operation_init failed (%d)\n", retval);
|
||||
goto error_operation;
|
||||
}
|
||||
return 0; /* Success */
|
||||
|
||||
error_operation:
|
||||
gb_hd_exit();
|
||||
error_hd:
|
||||
bus_unregister(&greybus_bus_type);
|
||||
error_bus:
|
||||
gb_debugfs_cleanup();
|
||||
|
||||
return retval;
|
||||
}
|
||||
module_init(gb_init);
|
||||
|
||||
static void __exit gb_exit(void)
|
||||
{
|
||||
gb_operation_exit();
|
||||
gb_hd_exit();
|
||||
bus_unregister(&greybus_bus_type);
|
||||
gb_debugfs_cleanup();
|
||||
tracepoint_synchronize_unregister();
|
||||
}
|
||||
module_exit(gb_exit);
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
|
29
drivers/greybus/debugfs.c
普通文件
29
drivers/greybus/debugfs.c
普通文件
@@ -0,0 +1,29 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Greybus debugfs code
|
||||
*
|
||||
* Copyright 2014 Google Inc.
|
||||
* Copyright 2014 Linaro Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/greybus.h>
|
||||
|
||||
static struct dentry *gb_debug_root;
|
||||
|
||||
void __init gb_debugfs_init(void)
|
||||
{
|
||||
gb_debug_root = debugfs_create_dir("greybus", NULL);
|
||||
}
|
||||
|
||||
void gb_debugfs_cleanup(void)
|
||||
{
|
||||
debugfs_remove_recursive(gb_debug_root);
|
||||
gb_debug_root = NULL;
|
||||
}
|
||||
|
||||
struct dentry *gb_debugfs_get(void)
|
||||
{
|
||||
return gb_debug_root;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_debugfs_get);
|
502
drivers/greybus/greybus_trace.h
普通文件
502
drivers/greybus/greybus_trace.h
普通文件
@@ -0,0 +1,502 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Greybus driver and device API
|
||||
*
|
||||
* Copyright 2015 Google Inc.
|
||||
* Copyright 2015 Linaro Ltd.
|
||||
*/
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM greybus
|
||||
|
||||
#if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_GREYBUS_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
struct gb_message;
|
||||
struct gb_operation;
|
||||
struct gb_connection;
|
||||
struct gb_bundle;
|
||||
struct gb_host_device;
|
||||
|
||||
DECLARE_EVENT_CLASS(gb_message,
|
||||
|
||||
TP_PROTO(struct gb_message *message),
|
||||
|
||||
TP_ARGS(message),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u16, size)
|
||||
__field(u16, operation_id)
|
||||
__field(u8, type)
|
||||
__field(u8, result)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->size = le16_to_cpu(message->header->size);
|
||||
__entry->operation_id =
|
||||
le16_to_cpu(message->header->operation_id);
|
||||
__entry->type = message->header->type;
|
||||
__entry->result = message->header->result;
|
||||
),
|
||||
|
||||
TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x",
|
||||
__entry->size, __entry->operation_id,
|
||||
__entry->type, __entry->result)
|
||||
);
|
||||
|
||||
#define DEFINE_MESSAGE_EVENT(name) \
|
||||
DEFINE_EVENT(gb_message, name, \
|
||||
TP_PROTO(struct gb_message *message), \
|
||||
TP_ARGS(message))
|
||||
|
||||
/*
|
||||
* Occurs immediately before calling a host device's message_send()
|
||||
* method.
|
||||
*/
|
||||
DEFINE_MESSAGE_EVENT(gb_message_send);
|
||||
|
||||
/*
|
||||
* Occurs after an incoming request message has been received
|
||||
*/
|
||||
DEFINE_MESSAGE_EVENT(gb_message_recv_request);
|
||||
|
||||
/*
|
||||
* Occurs after an incoming response message has been received,
|
||||
* after its matching request has been found.
|
||||
*/
|
||||
DEFINE_MESSAGE_EVENT(gb_message_recv_response);
|
||||
|
||||
/*
|
||||
* Occurs after an operation has been canceled, possibly before the
|
||||
* cancellation is complete.
|
||||
*/
|
||||
DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing);
|
||||
|
||||
/*
|
||||
* Occurs when an incoming request is cancelled; if the response has
|
||||
* been queued for sending, this occurs after it is sent.
|
||||
*/
|
||||
DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming);
|
||||
|
||||
/*
|
||||
* Occurs in the host driver message_send() function just prior to
|
||||
* handing off the data to be processed by hardware.
|
||||
*/
|
||||
DEFINE_MESSAGE_EVENT(gb_message_submit);
|
||||
|
||||
#undef DEFINE_MESSAGE_EVENT
|
||||
|
||||
DECLARE_EVENT_CLASS(gb_operation,
|
||||
|
||||
TP_PROTO(struct gb_operation *operation),
|
||||
|
||||
TP_ARGS(operation),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u16, cport_id) /* CPort of HD side of connection */
|
||||
__field(u16, id) /* Operation ID */
|
||||
__field(u8, type)
|
||||
__field(unsigned long, flags)
|
||||
__field(int, active)
|
||||
__field(int, waiters)
|
||||
__field(int, errno)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cport_id = operation->connection->hd_cport_id;
|
||||
__entry->id = operation->id;
|
||||
__entry->type = operation->type;
|
||||
__entry->flags = operation->flags;
|
||||
__entry->active = operation->active;
|
||||
__entry->waiters = atomic_read(&operation->waiters);
|
||||
__entry->errno = operation->errno;
|
||||
),
|
||||
|
||||
TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d",
|
||||
__entry->id, __entry->cport_id, __entry->type, __entry->flags,
|
||||
__entry->active, __entry->waiters, __entry->errno)
|
||||
);
|
||||
|
||||
#define DEFINE_OPERATION_EVENT(name) \
|
||||
DEFINE_EVENT(gb_operation, name, \
|
||||
TP_PROTO(struct gb_operation *operation), \
|
||||
TP_ARGS(operation))
|
||||
|
||||
/*
|
||||
* Occurs after a new operation is created for an outgoing request
|
||||
* has been successfully created.
|
||||
*/
|
||||
DEFINE_OPERATION_EVENT(gb_operation_create);
|
||||
|
||||
/*
|
||||
* Occurs after a new core operation has been created.
|
||||
*/
|
||||
DEFINE_OPERATION_EVENT(gb_operation_create_core);
|
||||
|
||||
/*
|
||||
* Occurs after a new operation has been created for an incoming
|
||||
* request has been successfully created and initialized.
|
||||
*/
|
||||
DEFINE_OPERATION_EVENT(gb_operation_create_incoming);
|
||||
|
||||
/*
|
||||
* Occurs when the last reference to an operation has been dropped,
|
||||
* prior to freeing resources.
|
||||
*/
|
||||
DEFINE_OPERATION_EVENT(gb_operation_destroy);
|
||||
|
||||
/*
|
||||
* Occurs when an operation has been marked active, after updating
|
||||
* its active count.
|
||||
*/
|
||||
DEFINE_OPERATION_EVENT(gb_operation_get_active);
|
||||
|
||||
/*
|
||||
* Occurs when an operation has been marked active, before updating
|
||||
* its active count.
|
||||
*/
|
||||
DEFINE_OPERATION_EVENT(gb_operation_put_active);
|
||||
|
||||
#undef DEFINE_OPERATION_EVENT
|
||||
|
||||
DECLARE_EVENT_CLASS(gb_connection,
|
||||
|
||||
TP_PROTO(struct gb_connection *connection),
|
||||
|
||||
TP_ARGS(connection),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, hd_bus_id)
|
||||
__field(u8, bundle_id)
|
||||
/* name contains "hd_cport_id/intf_id:cport_id" */
|
||||
__dynamic_array(char, name, sizeof(connection->name))
|
||||
__field(enum gb_connection_state, state)
|
||||
__field(unsigned long, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->hd_bus_id = connection->hd->bus_id;
|
||||
__entry->bundle_id = connection->bundle ?
|
||||
connection->bundle->id : BUNDLE_ID_NONE;
|
||||
memcpy(__get_str(name), connection->name,
|
||||
sizeof(connection->name));
|
||||
__entry->state = connection->state;
|
||||
__entry->flags = connection->flags;
|
||||
),
|
||||
|
||||
TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx",
|
||||
__entry->hd_bus_id, __entry->bundle_id, __get_str(name),
|
||||
(unsigned int)__entry->state, __entry->flags)
|
||||
);
|
||||
|
||||
#define DEFINE_CONNECTION_EVENT(name) \
|
||||
DEFINE_EVENT(gb_connection, name, \
|
||||
TP_PROTO(struct gb_connection *connection), \
|
||||
TP_ARGS(connection))
|
||||
|
||||
/*
|
||||
* Occurs after a new connection is successfully created.
|
||||
*/
|
||||
DEFINE_CONNECTION_EVENT(gb_connection_create);
|
||||
|
||||
/*
|
||||
* Occurs when the last reference to a connection has been dropped,
|
||||
* before its resources are freed.
|
||||
*/
|
||||
DEFINE_CONNECTION_EVENT(gb_connection_release);
|
||||
|
||||
/*
|
||||
* Occurs when a new reference to connection is added, currently
|
||||
* only when a message over the connection is received.
|
||||
*/
|
||||
DEFINE_CONNECTION_EVENT(gb_connection_get);
|
||||
|
||||
/*
|
||||
* Occurs when a new reference to connection is dropped, after a
|
||||
* a received message is handled, or when the connection is
|
||||
* destroyed.
|
||||
*/
|
||||
DEFINE_CONNECTION_EVENT(gb_connection_put);
|
||||
|
||||
/*
|
||||
* Occurs when a request to enable a connection is made, either for
|
||||
* transmit only, or for both transmit and receive.
|
||||
*/
|
||||
DEFINE_CONNECTION_EVENT(gb_connection_enable);
|
||||
|
||||
/*
|
||||
* Occurs when a request to disable a connection is made, either for
|
||||
* receive only, or for both transmit and receive. Also occurs when
|
||||
* a request to forcefully disable a connection is made.
|
||||
*/
|
||||
DEFINE_CONNECTION_EVENT(gb_connection_disable);
|
||||
|
||||
#undef DEFINE_CONNECTION_EVENT
|
||||
|
||||
DECLARE_EVENT_CLASS(gb_bundle,
|
||||
|
||||
TP_PROTO(struct gb_bundle *bundle),
|
||||
|
||||
TP_ARGS(bundle),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, intf_id)
|
||||
__field(u8, id)
|
||||
__field(u8, class)
|
||||
__field(size_t, num_cports)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->intf_id = bundle->intf->interface_id;
|
||||
__entry->id = bundle->id;
|
||||
__entry->class = bundle->class;
|
||||
__entry->num_cports = bundle->num_cports;
|
||||
),
|
||||
|
||||
TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu",
|
||||
__entry->intf_id, __entry->id, __entry->class,
|
||||
__entry->num_cports)
|
||||
);
|
||||
|
||||
#define DEFINE_BUNDLE_EVENT(name) \
|
||||
DEFINE_EVENT(gb_bundle, name, \
|
||||
TP_PROTO(struct gb_bundle *bundle), \
|
||||
TP_ARGS(bundle))
|
||||
|
||||
/*
|
||||
* Occurs after a new bundle is successfully created.
|
||||
*/
|
||||
DEFINE_BUNDLE_EVENT(gb_bundle_create);
|
||||
|
||||
/*
|
||||
* Occurs when the last reference to a bundle has been dropped,
|
||||
* before its resources are freed.
|
||||
*/
|
||||
DEFINE_BUNDLE_EVENT(gb_bundle_release);
|
||||
|
||||
/*
|
||||
* Occurs when a bundle is added to an interface when the interface
|
||||
* is enabled.
|
||||
*/
|
||||
DEFINE_BUNDLE_EVENT(gb_bundle_add);
|
||||
|
||||
/*
|
||||
* Occurs when a registered bundle gets destroyed, normally at the
|
||||
* time an interface is disabled.
|
||||
*/
|
||||
DEFINE_BUNDLE_EVENT(gb_bundle_destroy);
|
||||
|
||||
#undef DEFINE_BUNDLE_EVENT
|
||||
|
||||
DECLARE_EVENT_CLASS(gb_interface,
|
||||
|
||||
TP_PROTO(struct gb_interface *intf),
|
||||
|
||||
TP_ARGS(intf),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, module_id)
|
||||
__field(u8, id) /* Interface id */
|
||||
__field(u8, device_id)
|
||||
__field(int, disconnected) /* bool */
|
||||
__field(int, ejected) /* bool */
|
||||
__field(int, active) /* bool */
|
||||
__field(int, enabled) /* bool */
|
||||
__field(int, mode_switch) /* bool */
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->module_id = intf->module->module_id;
|
||||
__entry->id = intf->interface_id;
|
||||
__entry->device_id = intf->device_id;
|
||||
__entry->disconnected = intf->disconnected;
|
||||
__entry->ejected = intf->ejected;
|
||||
__entry->active = intf->active;
|
||||
__entry->enabled = intf->enabled;
|
||||
__entry->mode_switch = intf->mode_switch;
|
||||
),
|
||||
|
||||
TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d",
|
||||
__entry->id, __entry->device_id, __entry->module_id,
|
||||
__entry->disconnected, __entry->ejected, __entry->active,
|
||||
__entry->enabled, __entry->mode_switch)
|
||||
);
|
||||
|
||||
#define DEFINE_INTERFACE_EVENT(name) \
|
||||
DEFINE_EVENT(gb_interface, name, \
|
||||
TP_PROTO(struct gb_interface *intf), \
|
||||
TP_ARGS(intf))
|
||||
|
||||
/*
|
||||
* Occurs after a new interface is successfully created.
|
||||
*/
|
||||
DEFINE_INTERFACE_EVENT(gb_interface_create);
|
||||
|
||||
/*
|
||||
* Occurs after the last reference to an interface has been dropped.
|
||||
*/
|
||||
DEFINE_INTERFACE_EVENT(gb_interface_release);
|
||||
|
||||
/*
|
||||
* Occurs after an interface been registerd.
|
||||
*/
|
||||
DEFINE_INTERFACE_EVENT(gb_interface_add);
|
||||
|
||||
/*
|
||||
* Occurs when a registered interface gets deregisterd.
|
||||
*/
|
||||
DEFINE_INTERFACE_EVENT(gb_interface_del);
|
||||
|
||||
/*
|
||||
* Occurs when a registered interface has been successfully
|
||||
* activated.
|
||||
*/
|
||||
DEFINE_INTERFACE_EVENT(gb_interface_activate);
|
||||
|
||||
/*
|
||||
* Occurs when an activated interface is being deactivated.
|
||||
*/
|
||||
DEFINE_INTERFACE_EVENT(gb_interface_deactivate);
|
||||
|
||||
/*
|
||||
* Occurs when an interface has been successfully enabled.
|
||||
*/
|
||||
DEFINE_INTERFACE_EVENT(gb_interface_enable);
|
||||
|
||||
/*
|
||||
* Occurs when an enabled interface is being disabled.
|
||||
*/
|
||||
DEFINE_INTERFACE_EVENT(gb_interface_disable);
|
||||
|
||||
#undef DEFINE_INTERFACE_EVENT
|
||||
|
||||
DECLARE_EVENT_CLASS(gb_module,
|
||||
|
||||
TP_PROTO(struct gb_module *module),
|
||||
|
||||
TP_ARGS(module),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, hd_bus_id)
|
||||
__field(u8, module_id)
|
||||
__field(size_t, num_interfaces)
|
||||
__field(int, disconnected) /* bool */
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->hd_bus_id = module->hd->bus_id;
|
||||
__entry->module_id = module->module_id;
|
||||
__entry->num_interfaces = module->num_interfaces;
|
||||
__entry->disconnected = module->disconnected;
|
||||
),
|
||||
|
||||
TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d",
|
||||
__entry->hd_bus_id, __entry->module_id,
|
||||
__entry->num_interfaces, __entry->disconnected)
|
||||
);
|
||||
|
||||
#define DEFINE_MODULE_EVENT(name) \
|
||||
DEFINE_EVENT(gb_module, name, \
|
||||
TP_PROTO(struct gb_module *module), \
|
||||
TP_ARGS(module))
|
||||
|
||||
/*
|
||||
* Occurs after a new module is successfully created, before
|
||||
* creating any of its interfaces.
|
||||
*/
|
||||
DEFINE_MODULE_EVENT(gb_module_create);
|
||||
|
||||
/*
|
||||
* Occurs after the last reference to a module has been dropped.
|
||||
*/
|
||||
DEFINE_MODULE_EVENT(gb_module_release);
|
||||
|
||||
/*
|
||||
* Occurs after a module is successfully created, before registering
|
||||
* any of its interfaces.
|
||||
*/
|
||||
DEFINE_MODULE_EVENT(gb_module_add);
|
||||
|
||||
/*
|
||||
* Occurs when a module is deleted, before deregistering its
|
||||
* interfaces.
|
||||
*/
|
||||
DEFINE_MODULE_EVENT(gb_module_del);
|
||||
|
||||
#undef DEFINE_MODULE_EVENT
|
||||
|
||||
DECLARE_EVENT_CLASS(gb_host_device,
|
||||
|
||||
TP_PROTO(struct gb_host_device *hd),
|
||||
|
||||
TP_ARGS(hd),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, bus_id)
|
||||
__field(size_t, num_cports)
|
||||
__field(size_t, buffer_size_max)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->bus_id = hd->bus_id;
|
||||
__entry->num_cports = hd->num_cports;
|
||||
__entry->buffer_size_max = hd->buffer_size_max;
|
||||
),
|
||||
|
||||
TP_printk("bus_id=%d num_cports=%zu mtu=%zu",
|
||||
__entry->bus_id, __entry->num_cports,
|
||||
__entry->buffer_size_max)
|
||||
);
|
||||
|
||||
#define DEFINE_HD_EVENT(name) \
|
||||
DEFINE_EVENT(gb_host_device, name, \
|
||||
TP_PROTO(struct gb_host_device *hd), \
|
||||
TP_ARGS(hd))
|
||||
|
||||
/*
|
||||
* Occurs after a new host device is successfully created, before
|
||||
* its SVC has been set up.
|
||||
*/
|
||||
DEFINE_HD_EVENT(gb_hd_create);
|
||||
|
||||
/*
|
||||
* Occurs after the last reference to a host device has been
|
||||
* dropped.
|
||||
*/
|
||||
DEFINE_HD_EVENT(gb_hd_release);
|
||||
|
||||
/*
|
||||
* Occurs after a new host device has been added, after the
|
||||
* connection to its SVC has been enabled.
|
||||
*/
|
||||
DEFINE_HD_EVENT(gb_hd_add);
|
||||
|
||||
/*
|
||||
* Occurs when a host device is being disconnected from the AP USB
|
||||
* host controller.
|
||||
*/
|
||||
DEFINE_HD_EVENT(gb_hd_del);
|
||||
|
||||
/*
|
||||
* Occurs when a host device has passed received data to the Greybus
|
||||
* core, after it has been determined it is destined for a valid
|
||||
* CPort.
|
||||
*/
|
||||
DEFINE_HD_EVENT(gb_hd_in);
|
||||
|
||||
#undef DEFINE_HD_EVENT
|
||||
|
||||
#endif /* _TRACE_GREYBUS_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
|
||||
/*
|
||||
* TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
|
||||
*/
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE greybus_trace
|
||||
#include <trace/define_trace.h>
|
||||
|
256
drivers/greybus/hd.c
普通文件
256
drivers/greybus/hd.c
普通文件
@@ -0,0 +1,256 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Greybus Host Device
|
||||
*
|
||||
* Copyright 2014-2015 Google Inc.
|
||||
* Copyright 2014-2015 Linaro Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/greybus.h>
|
||||
|
||||
#include "greybus_trace.h"
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit);
|
||||
|
||||
static struct ida gb_hd_bus_id_map;
|
||||
|
||||
int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
|
||||
bool async)
|
||||
{
|
||||
if (!hd || !hd->driver || !hd->driver->output)
|
||||
return -EINVAL;
|
||||
return hd->driver->output(hd, req, size, cmd, async);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_hd_output);
|
||||
|
||||
static ssize_t bus_id_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gb_host_device *hd = to_gb_host_device(dev);
|
||||
|
||||
return sprintf(buf, "%d\n", hd->bus_id);
|
||||
}
|
||||
static DEVICE_ATTR_RO(bus_id);
|
||||
|
||||
static struct attribute *bus_attrs[] = {
|
||||
&dev_attr_bus_id.attr,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(bus);
|
||||
|
||||
int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
|
||||
{
|
||||
struct ida *id_map = &hd->cport_id_map;
|
||||
int ret;
|
||||
|
||||
ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_hd_cport_reserve);
|
||||
|
||||
void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
|
||||
{
|
||||
struct ida *id_map = &hd->cport_id_map;
|
||||
|
||||
ida_simple_remove(id_map, cport_id);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
|
||||
|
||||
/* Locking: Caller guarantees serialisation */
|
||||
int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct ida *id_map = &hd->cport_id_map;
|
||||
int ida_start, ida_end;
|
||||
|
||||
if (hd->driver->cport_allocate)
|
||||
return hd->driver->cport_allocate(hd, cport_id, flags);
|
||||
|
||||
if (cport_id < 0) {
|
||||
ida_start = 0;
|
||||
ida_end = hd->num_cports;
|
||||
} else if (cport_id < hd->num_cports) {
|
||||
ida_start = cport_id;
|
||||
ida_end = cport_id + 1;
|
||||
} else {
|
||||
dev_err(&hd->dev, "cport %d not available\n", cport_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/* Locking: Caller guarantees serialisation */
|
||||
void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
|
||||
{
|
||||
if (hd->driver->cport_release) {
|
||||
hd->driver->cport_release(hd, cport_id);
|
||||
return;
|
||||
}
|
||||
|
||||
ida_simple_remove(&hd->cport_id_map, cport_id);
|
||||
}
|
||||
|
||||
static void gb_hd_release(struct device *dev)
|
||||
{
|
||||
struct gb_host_device *hd = to_gb_host_device(dev);
|
||||
|
||||
trace_gb_hd_release(hd);
|
||||
|
||||
if (hd->svc)
|
||||
gb_svc_put(hd->svc);
|
||||
ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
|
||||
ida_destroy(&hd->cport_id_map);
|
||||
kfree(hd);
|
||||
}
|
||||
|
||||
struct device_type greybus_hd_type = {
|
||||
.name = "greybus_host_device",
|
||||
.release = gb_hd_release,
|
||||
};
|
||||
|
||||
struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
|
||||
struct device *parent,
|
||||
size_t buffer_size_max,
|
||||
size_t num_cports)
|
||||
{
|
||||
struct gb_host_device *hd;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Validate that the driver implements all of the callbacks
|
||||
* so that we don't have to every time we make them.
|
||||
*/
|
||||
if ((!driver->message_send) || (!driver->message_cancel)) {
|
||||
dev_err(parent, "mandatory hd-callbacks missing\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) {
|
||||
dev_err(parent, "greybus host-device buffers too small\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) {
|
||||
dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure to never allocate messages larger than what the Greybus
|
||||
* protocol supports.
|
||||
*/
|
||||
if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) {
|
||||
dev_warn(parent, "limiting buffer size to %u\n",
|
||||
GB_OPERATION_MESSAGE_SIZE_MAX);
|
||||
buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX;
|
||||
}
|
||||
|
||||
hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL);
|
||||
if (!hd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
kfree(hd);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
hd->bus_id = ret;
|
||||
|
||||
hd->driver = driver;
|
||||
INIT_LIST_HEAD(&hd->modules);
|
||||
INIT_LIST_HEAD(&hd->connections);
|
||||
ida_init(&hd->cport_id_map);
|
||||
hd->buffer_size_max = buffer_size_max;
|
||||
hd->num_cports = num_cports;
|
||||
|
||||
hd->dev.parent = parent;
|
||||
hd->dev.bus = &greybus_bus_type;
|
||||
hd->dev.type = &greybus_hd_type;
|
||||
hd->dev.groups = bus_groups;
|
||||
hd->dev.dma_mask = hd->dev.parent->dma_mask;
|
||||
device_initialize(&hd->dev);
|
||||
dev_set_name(&hd->dev, "greybus%d", hd->bus_id);
|
||||
|
||||
trace_gb_hd_create(hd);
|
||||
|
||||
hd->svc = gb_svc_create(hd);
|
||||
if (!hd->svc) {
|
||||
dev_err(&hd->dev, "failed to create svc\n");
|
||||
put_device(&hd->dev);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return hd;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_hd_create);
|
||||
|
||||
int gb_hd_add(struct gb_host_device *hd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = device_add(&hd->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = gb_svc_add(hd->svc);
|
||||
if (ret) {
|
||||
device_del(&hd->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_gb_hd_add(hd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_hd_add);
|
||||
|
||||
void gb_hd_del(struct gb_host_device *hd)
|
||||
{
|
||||
trace_gb_hd_del(hd);
|
||||
|
||||
/*
|
||||
* Tear down the svc and flush any on-going hotplug processing before
|
||||
* removing the remaining interfaces.
|
||||
*/
|
||||
gb_svc_del(hd->svc);
|
||||
|
||||
device_del(&hd->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_hd_del);
|
||||
|
||||
void gb_hd_shutdown(struct gb_host_device *hd)
|
||||
{
|
||||
gb_svc_del(hd->svc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_hd_shutdown);
|
||||
|
||||
void gb_hd_put(struct gb_host_device *hd)
|
||||
{
|
||||
put_device(&hd->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_hd_put);
|
||||
|
||||
int __init gb_hd_init(void)
|
||||
{
|
||||
ida_init(&gb_hd_bus_id_map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gb_hd_exit(void)
|
||||
{
|
||||
ida_destroy(&gb_hd_bus_id_map);
|
||||
}
|
1263
drivers/greybus/interface.c
普通文件
1263
drivers/greybus/interface.c
普通文件
文件差异内容过多而无法显示
加载差异
533
drivers/greybus/manifest.c
普通文件
533
drivers/greybus/manifest.c
普通文件
@@ -0,0 +1,533 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Greybus manifest parsing
|
||||
*
|
||||
* Copyright 2014-2015 Google Inc.
|
||||
* Copyright 2014-2015 Linaro Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/greybus.h>
|
||||
|
||||
static const char *get_descriptor_type_string(u8 type)
|
||||
{
|
||||
switch (type) {
|
||||
case GREYBUS_TYPE_INVALID:
|
||||
return "invalid";
|
||||
case GREYBUS_TYPE_STRING:
|
||||
return "string";
|
||||
case GREYBUS_TYPE_INTERFACE:
|
||||
return "interface";
|
||||
case GREYBUS_TYPE_CPORT:
|
||||
return "cport";
|
||||
case GREYBUS_TYPE_BUNDLE:
|
||||
return "bundle";
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We scan the manifest once to identify where all the descriptors
|
||||
* are. The result is a list of these manifest_desc structures. We
|
||||
* then pick through them for what we're looking for (starting with
|
||||
* the interface descriptor). As each is processed we remove it from
|
||||
* the list. When we're done the list should (probably) be empty.
|
||||
*/
|
||||
struct manifest_desc {
|
||||
struct list_head links;
|
||||
|
||||
size_t size;
|
||||
void *data;
|
||||
enum greybus_descriptor_type type;
|
||||
};
|
||||
|
||||
static void release_manifest_descriptor(struct manifest_desc *descriptor)
|
||||
{
|
||||
list_del(&descriptor->links);
|
||||
kfree(descriptor);
|
||||
}
|
||||
|
||||
static void release_manifest_descriptors(struct gb_interface *intf)
|
||||
{
|
||||
struct manifest_desc *descriptor;
|
||||
struct manifest_desc *next;
|
||||
|
||||
list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
|
||||
release_manifest_descriptor(descriptor);
|
||||
}
|
||||
|
||||
static void release_cport_descriptors(struct list_head *head, u8 bundle_id)
|
||||
{
|
||||
struct manifest_desc *desc, *tmp;
|
||||
struct greybus_descriptor_cport *desc_cport;
|
||||
|
||||
list_for_each_entry_safe(desc, tmp, head, links) {
|
||||
desc_cport = desc->data;
|
||||
|
||||
if (desc->type != GREYBUS_TYPE_CPORT)
|
||||
continue;
|
||||
|
||||
if (desc_cport->bundle == bundle_id)
|
||||
release_manifest_descriptor(desc);
|
||||
}
|
||||
}
|
||||
|
||||
static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf)
|
||||
{
|
||||
struct manifest_desc *descriptor;
|
||||
struct manifest_desc *next;
|
||||
|
||||
list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
|
||||
if (descriptor->type == GREYBUS_TYPE_BUNDLE)
|
||||
return descriptor;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate the given descriptor. Its reported size must fit within
|
||||
* the number of bytes remaining, and it must have a recognized
|
||||
* type. Check that the reported size is at least as big as what
|
||||
* we expect to see. (It could be bigger, perhaps for a new version
|
||||
* of the format.)
|
||||
*
|
||||
* Returns the (non-zero) number of bytes consumed by the descriptor,
|
||||
* or a negative errno.
|
||||
*/
|
||||
static int identify_descriptor(struct gb_interface *intf,
|
||||
struct greybus_descriptor *desc, size_t size)
|
||||
{
|
||||
struct greybus_descriptor_header *desc_header = &desc->header;
|
||||
struct manifest_desc *descriptor;
|
||||
size_t desc_size;
|
||||
size_t expected_size;
|
||||
|
||||
if (size < sizeof(*desc_header)) {
|
||||
dev_err(&intf->dev, "manifest too small (%zu < %zu)\n", size,
|
||||
sizeof(*desc_header));
|
||||
return -EINVAL; /* Must at least have header */
|
||||
}
|
||||
|
||||
desc_size = le16_to_cpu(desc_header->size);
|
||||
if (desc_size > size) {
|
||||
dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
|
||||
desc_size, size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Descriptor needs to at least have a header */
|
||||
expected_size = sizeof(*desc_header);
|
||||
|
||||
switch (desc_header->type) {
|
||||
case GREYBUS_TYPE_STRING:
|
||||
expected_size += sizeof(struct greybus_descriptor_string);
|
||||
expected_size += desc->string.length;
|
||||
|
||||
/* String descriptors are padded to 4 byte boundaries */
|
||||
expected_size = ALIGN(expected_size, 4);
|
||||
break;
|
||||
case GREYBUS_TYPE_INTERFACE:
|
||||
expected_size += sizeof(struct greybus_descriptor_interface);
|
||||
break;
|
||||
case GREYBUS_TYPE_BUNDLE:
|
||||
expected_size += sizeof(struct greybus_descriptor_bundle);
|
||||
break;
|
||||
case GREYBUS_TYPE_CPORT:
|
||||
expected_size += sizeof(struct greybus_descriptor_cport);
|
||||
break;
|
||||
case GREYBUS_TYPE_INVALID:
|
||||
default:
|
||||
dev_err(&intf->dev, "invalid descriptor type (%u)\n",
|
||||
desc_header->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (desc_size < expected_size) {
|
||||
dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
|
||||
get_descriptor_type_string(desc_header->type),
|
||||
desc_size, expected_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Descriptor bigger than what we expect */
|
||||
if (desc_size > expected_size) {
|
||||
dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
|
||||
get_descriptor_type_string(desc_header->type),
|
||||
expected_size, desc_size);
|
||||
}
|
||||
|
||||
descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
|
||||
if (!descriptor)
|
||||
return -ENOMEM;
|
||||
|
||||
descriptor->size = desc_size;
|
||||
descriptor->data = (char *)desc + sizeof(*desc_header);
|
||||
descriptor->type = desc_header->type;
|
||||
list_add_tail(&descriptor->links, &intf->manifest_descs);
|
||||
|
||||
/* desc_size is positive and is known to fit in a signed int */
|
||||
|
||||
return desc_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the string descriptor having the given id, validate it, and
|
||||
* allocate a duplicate copy of it. The duplicate has an extra byte
|
||||
* which guarantees the returned string is NUL-terminated.
|
||||
*
|
||||
* String index 0 is valid (it represents "no string"), and for
|
||||
* that a null pointer is returned.
|
||||
*
|
||||
* Otherwise returns a pointer to a newly-allocated copy of the
|
||||
* descriptor string, or an error-coded pointer on failure.
|
||||
*/
|
||||
static char *gb_string_get(struct gb_interface *intf, u8 string_id)
|
||||
{
|
||||
struct greybus_descriptor_string *desc_string;
|
||||
struct manifest_desc *descriptor;
|
||||
bool found = false;
|
||||
char *string;
|
||||
|
||||
/* A zero string id means no string (but no error) */
|
||||
if (!string_id)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(descriptor, &intf->manifest_descs, links) {
|
||||
if (descriptor->type != GREYBUS_TYPE_STRING)
|
||||
continue;
|
||||
|
||||
desc_string = descriptor->data;
|
||||
if (desc_string->id == string_id) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
/* Allocate an extra byte so we can guarantee it's NUL-terminated */
|
||||
string = kmemdup(&desc_string->string, desc_string->length + 1,
|
||||
GFP_KERNEL);
|
||||
if (!string)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
string[desc_string->length] = '\0';
|
||||
|
||||
/* Ok we've used this string, so we're done with it */
|
||||
release_manifest_descriptor(descriptor);
|
||||
|
||||
return string;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find cport descriptors in the manifest associated with the given
|
||||
* bundle, and set up data structures for the functions that use
|
||||
* them. Returns the number of cports set up for the bundle, or 0
|
||||
* if there is an error.
|
||||
*/
|
||||
static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
|
||||
{
|
||||
struct gb_interface *intf = bundle->intf;
|
||||
struct greybus_descriptor_cport *desc_cport;
|
||||
struct manifest_desc *desc, *next, *tmp;
|
||||
LIST_HEAD(list);
|
||||
u8 bundle_id = bundle->id;
|
||||
u16 cport_id;
|
||||
u32 count = 0;
|
||||
int i;
|
||||
|
||||
/* Set up all cport descriptors associated with this bundle */
|
||||
list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) {
|
||||
if (desc->type != GREYBUS_TYPE_CPORT)
|
||||
continue;
|
||||
|
||||
desc_cport = desc->data;
|
||||
if (desc_cport->bundle != bundle_id)
|
||||
continue;
|
||||
|
||||
cport_id = le16_to_cpu(desc_cport->id);
|
||||
if (cport_id > CPORT_ID_MAX)
|
||||
goto exit;
|
||||
|
||||
/* Nothing else should have its cport_id as control cport id */
|
||||
if (cport_id == GB_CONTROL_CPORT_ID) {
|
||||
dev_err(&bundle->dev, "invalid cport id found (%02u)\n",
|
||||
cport_id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* Found one, move it to our temporary list after checking for
|
||||
* duplicates.
|
||||
*/
|
||||
list_for_each_entry(tmp, &list, links) {
|
||||
desc_cport = tmp->data;
|
||||
if (cport_id == le16_to_cpu(desc_cport->id)) {
|
||||
dev_err(&bundle->dev,
|
||||
"duplicate CPort %u found\n", cport_id);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
list_move_tail(&desc->links, &list);
|
||||
count++;
|
||||
}
|
||||
|
||||
if (!count)
|
||||
return 0;
|
||||
|
||||
bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
|
||||
GFP_KERNEL);
|
||||
if (!bundle->cport_desc)
|
||||
goto exit;
|
||||
|
||||
bundle->num_cports = count;
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry_safe(desc, next, &list, links) {
|
||||
desc_cport = desc->data;
|
||||
memcpy(&bundle->cport_desc[i++], desc_cport,
|
||||
sizeof(*desc_cport));
|
||||
|
||||
/* Release the cport descriptor */
|
||||
release_manifest_descriptor(desc);
|
||||
}
|
||||
|
||||
return count;
|
||||
exit:
|
||||
release_cport_descriptors(&list, bundle_id);
|
||||
/*
|
||||
* Free all cports for this bundle to avoid 'excess descriptors'
|
||||
* warnings.
|
||||
*/
|
||||
release_cport_descriptors(&intf->manifest_descs, bundle_id);
|
||||
|
||||
return 0; /* Error; count should also be 0 */
|
||||
}
|
||||
|
||||
/*
|
||||
* Find bundle descriptors in the manifest and set up their data
|
||||
* structures. Returns the number of bundles set up for the
|
||||
* given interface.
|
||||
*/
|
||||
static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
|
||||
{
|
||||
struct manifest_desc *desc;
|
||||
struct gb_bundle *bundle;
|
||||
struct gb_bundle *bundle_next;
|
||||
u32 count = 0;
|
||||
u8 bundle_id;
|
||||
u8 class;
|
||||
|
||||
while ((desc = get_next_bundle_desc(intf))) {
|
||||
struct greybus_descriptor_bundle *desc_bundle;
|
||||
|
||||
/* Found one. Set up its bundle structure*/
|
||||
desc_bundle = desc->data;
|
||||
bundle_id = desc_bundle->id;
|
||||
class = desc_bundle->class;
|
||||
|
||||
/* Done with this bundle descriptor */
|
||||
release_manifest_descriptor(desc);
|
||||
|
||||
/* Ignore any legacy control bundles */
|
||||
if (bundle_id == GB_CONTROL_BUNDLE_ID) {
|
||||
dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
|
||||
__func__);
|
||||
release_cport_descriptors(&intf->manifest_descs,
|
||||
bundle_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Nothing else should have its class set to control class */
|
||||
if (class == GREYBUS_CLASS_CONTROL) {
|
||||
dev_err(&intf->dev,
|
||||
"bundle %u cannot use control class\n",
|
||||
bundle_id);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
bundle = gb_bundle_create(intf, bundle_id, class);
|
||||
if (!bundle)
|
||||
goto cleanup;
|
||||
|
||||
/*
|
||||
* Now go set up this bundle's functions and cports.
|
||||
*
|
||||
* A 'bundle' represents a device in greybus. It may require
|
||||
* multiple cports for its functioning. If we fail to setup any
|
||||
* cport of a bundle, we better reject the complete bundle as
|
||||
* the device may not be able to function properly then.
|
||||
*
|
||||
* But, failing to setup a cport of bundle X doesn't mean that
|
||||
* the device corresponding to bundle Y will not work properly.
|
||||
* Bundles should be treated as separate independent devices.
|
||||
*
|
||||
* While parsing manifest for an interface, treat bundles as
|
||||
* separate entities and don't reject entire interface and its
|
||||
* bundles on failing to initialize a cport. But make sure the
|
||||
* bundle which needs the cport, gets destroyed properly.
|
||||
*/
|
||||
if (!gb_manifest_parse_cports(bundle)) {
|
||||
gb_bundle_destroy(bundle);
|
||||
continue;
|
||||
}
|
||||
|
||||
count++;
|
||||
}
|
||||
|
||||
return count;
|
||||
cleanup:
|
||||
/* An error occurred; undo any changes we've made */
|
||||
list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) {
|
||||
gb_bundle_destroy(bundle);
|
||||
count--;
|
||||
}
|
||||
return 0; /* Error; count should also be 0 */
|
||||
}
|
||||
|
||||
static bool gb_manifest_parse_interface(struct gb_interface *intf,
|
||||
struct manifest_desc *interface_desc)
|
||||
{
|
||||
struct greybus_descriptor_interface *desc_intf = interface_desc->data;
|
||||
struct gb_control *control = intf->control;
|
||||
char *str;
|
||||
|
||||
/* Handle the strings first--they can fail */
|
||||
str = gb_string_get(intf, desc_intf->vendor_stringid);
|
||||
if (IS_ERR(str))
|
||||
return false;
|
||||
control->vendor_string = str;
|
||||
|
||||
str = gb_string_get(intf, desc_intf->product_stringid);
|
||||
if (IS_ERR(str))
|
||||
goto out_free_vendor_string;
|
||||
control->product_string = str;
|
||||
|
||||
/* Assign feature flags communicated via manifest */
|
||||
intf->features = desc_intf->features;
|
||||
|
||||
/* Release the interface descriptor, now that we're done with it */
|
||||
release_manifest_descriptor(interface_desc);
|
||||
|
||||
/* An interface must have at least one bundle descriptor */
|
||||
if (!gb_manifest_parse_bundles(intf)) {
|
||||
dev_err(&intf->dev, "manifest bundle descriptors not valid\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
return true;
|
||||
out_err:
|
||||
kfree(control->product_string);
|
||||
control->product_string = NULL;
|
||||
out_free_vendor_string:
|
||||
kfree(control->vendor_string);
|
||||
control->vendor_string = NULL;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse a buffer containing an interface manifest.
|
||||
*
|
||||
* If we find anything wrong with the content/format of the buffer
|
||||
* we reject it.
|
||||
*
|
||||
* The first requirement is that the manifest's version is
|
||||
* one we can parse.
|
||||
*
|
||||
* We make an initial pass through the buffer and identify all of
|
||||
* the descriptors it contains, keeping track for each its type
|
||||
* and the location size of its data in the buffer.
|
||||
*
|
||||
* Next we scan the descriptors, looking for an interface descriptor;
|
||||
* there must be exactly one of those. When found, we record the
|
||||
* information it contains, and then remove that descriptor (and any
|
||||
* string descriptors it refers to) from further consideration.
|
||||
*
|
||||
* After that we look for the interface's bundles--there must be at
|
||||
* least one of those.
|
||||
*
|
||||
* Returns true if parsing was successful, false otherwise.
|
||||
*/
|
||||
bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
|
||||
{
|
||||
struct greybus_manifest *manifest;
|
||||
struct greybus_manifest_header *header;
|
||||
struct greybus_descriptor *desc;
|
||||
struct manifest_desc *descriptor;
|
||||
struct manifest_desc *interface_desc = NULL;
|
||||
u16 manifest_size;
|
||||
u32 found = 0;
|
||||
bool result;
|
||||
|
||||
/* Manifest descriptor list should be empty here */
|
||||
if (WARN_ON(!list_empty(&intf->manifest_descs)))
|
||||
return false;
|
||||
|
||||
/* we have to have at _least_ the manifest header */
|
||||
if (size < sizeof(*header)) {
|
||||
dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
|
||||
size, sizeof(*header));
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Make sure the size is right */
|
||||
manifest = data;
|
||||
header = &manifest->header;
|
||||
manifest_size = le16_to_cpu(header->size);
|
||||
if (manifest_size != size) {
|
||||
dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
|
||||
size, manifest_size);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Validate major/minor number */
|
||||
if (header->version_major > GREYBUS_VERSION_MAJOR) {
|
||||
dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
|
||||
header->version_major, header->version_minor,
|
||||
GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* OK, find all the descriptors */
|
||||
desc = manifest->descriptors;
|
||||
size -= sizeof(*header);
|
||||
while (size) {
|
||||
int desc_size;
|
||||
|
||||
desc_size = identify_descriptor(intf, desc, size);
|
||||
if (desc_size < 0) {
|
||||
result = false;
|
||||
goto out;
|
||||
}
|
||||
desc = (struct greybus_descriptor *)((char *)desc + desc_size);
|
||||
size -= desc_size;
|
||||
}
|
||||
|
||||
/* There must be a single interface descriptor */
|
||||
list_for_each_entry(descriptor, &intf->manifest_descs, links) {
|
||||
if (descriptor->type == GREYBUS_TYPE_INTERFACE)
|
||||
if (!found++)
|
||||
interface_desc = descriptor;
|
||||
}
|
||||
if (found != 1) {
|
||||
dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
|
||||
found);
|
||||
result = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Parse the manifest, starting with the interface descriptor */
|
||||
result = gb_manifest_parse_interface(intf, interface_desc);
|
||||
|
||||
/*
|
||||
* We really should have no remaining descriptors, but we
|
||||
* don't know what newer format manifests might leave.
|
||||
*/
|
||||
if (result && !list_empty(&intf->manifest_descs))
|
||||
dev_info(&intf->dev, "excess descriptors in interface manifest\n");
|
||||
out:
|
||||
release_manifest_descriptors(intf);
|
||||
|
||||
return result;
|
||||
}
|
236
drivers/greybus/module.c
普通文件
236
drivers/greybus/module.c
普通文件
@@ -0,0 +1,236 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Greybus Module code
|
||||
*
|
||||
* Copyright 2016 Google Inc.
|
||||
* Copyright 2016 Linaro Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/greybus.h>
|
||||
#include "greybus_trace.h"
|
||||
|
||||
static ssize_t eject_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct gb_module *module = to_gb_module(dev);
|
||||
struct gb_interface *intf;
|
||||
size_t i;
|
||||
long val;
|
||||
int ret;
|
||||
|
||||
ret = kstrtol(buf, 0, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!val)
|
||||
return len;
|
||||
|
||||
for (i = 0; i < module->num_interfaces; ++i) {
|
||||
intf = module->interfaces[i];
|
||||
|
||||
mutex_lock(&intf->mutex);
|
||||
/* Set flag to prevent concurrent activation. */
|
||||
intf->ejected = true;
|
||||
gb_interface_disable(intf);
|
||||
gb_interface_deactivate(intf);
|
||||
mutex_unlock(&intf->mutex);
|
||||
}
|
||||
|
||||
/* Tell the SVC to eject the primary interface. */
|
||||
ret = gb_svc_intf_eject(module->hd->svc, module->module_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_WO(eject);
|
||||
|
||||
static ssize_t module_id_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gb_module *module = to_gb_module(dev);
|
||||
|
||||
return sprintf(buf, "%u\n", module->module_id);
|
||||
}
|
||||
static DEVICE_ATTR_RO(module_id);
|
||||
|
||||
static ssize_t num_interfaces_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gb_module *module = to_gb_module(dev);
|
||||
|
||||
return sprintf(buf, "%zu\n", module->num_interfaces);
|
||||
}
|
||||
static DEVICE_ATTR_RO(num_interfaces);
|
||||
|
||||
static struct attribute *module_attrs[] = {
|
||||
&dev_attr_eject.attr,
|
||||
&dev_attr_module_id.attr,
|
||||
&dev_attr_num_interfaces.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(module);
|
||||
|
||||
static void gb_module_release(struct device *dev)
|
||||
{
|
||||
struct gb_module *module = to_gb_module(dev);
|
||||
|
||||
trace_gb_module_release(module);
|
||||
|
||||
kfree(module);
|
||||
}
|
||||
|
||||
struct device_type greybus_module_type = {
|
||||
.name = "greybus_module",
|
||||
.release = gb_module_release,
|
||||
};
|
||||
|
||||
struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
|
||||
size_t num_interfaces)
|
||||
{
|
||||
struct gb_interface *intf;
|
||||
struct gb_module *module;
|
||||
int i;
|
||||
|
||||
module = kzalloc(struct_size(module, interfaces, num_interfaces),
|
||||
GFP_KERNEL);
|
||||
if (!module)
|
||||
return NULL;
|
||||
|
||||
module->hd = hd;
|
||||
module->module_id = module_id;
|
||||
module->num_interfaces = num_interfaces;
|
||||
|
||||
module->dev.parent = &hd->dev;
|
||||
module->dev.bus = &greybus_bus_type;
|
||||
module->dev.type = &greybus_module_type;
|
||||
module->dev.groups = module_groups;
|
||||
module->dev.dma_mask = hd->dev.dma_mask;
|
||||
device_initialize(&module->dev);
|
||||
dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id);
|
||||
|
||||
trace_gb_module_create(module);
|
||||
|
||||
for (i = 0; i < num_interfaces; ++i) {
|
||||
intf = gb_interface_create(module, module_id + i);
|
||||
if (!intf) {
|
||||
dev_err(&module->dev, "failed to create interface %u\n",
|
||||
module_id + i);
|
||||
goto err_put_interfaces;
|
||||
}
|
||||
module->interfaces[i] = intf;
|
||||
}
|
||||
|
||||
return module;
|
||||
|
||||
err_put_interfaces:
|
||||
for (--i; i >= 0; --i)
|
||||
gb_interface_put(module->interfaces[i]);
|
||||
|
||||
put_device(&module->dev);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Register and enable an interface after first attempting to activate it.
|
||||
*/
|
||||
static void gb_module_register_interface(struct gb_interface *intf)
|
||||
{
|
||||
struct gb_module *module = intf->module;
|
||||
u8 intf_id = intf->interface_id;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&intf->mutex);
|
||||
|
||||
ret = gb_interface_activate(intf);
|
||||
if (ret) {
|
||||
if (intf->type != GB_INTERFACE_TYPE_DUMMY) {
|
||||
dev_err(&module->dev,
|
||||
"failed to activate interface %u: %d\n",
|
||||
intf_id, ret);
|
||||
}
|
||||
|
||||
gb_interface_add(intf);
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
ret = gb_interface_add(intf);
|
||||
if (ret)
|
||||
goto err_interface_deactivate;
|
||||
|
||||
ret = gb_interface_enable(intf);
|
||||
if (ret) {
|
||||
dev_err(&module->dev, "failed to enable interface %u: %d\n",
|
||||
intf_id, ret);
|
||||
goto err_interface_deactivate;
|
||||
}
|
||||
|
||||
mutex_unlock(&intf->mutex);
|
||||
|
||||
return;
|
||||
|
||||
err_interface_deactivate:
|
||||
gb_interface_deactivate(intf);
|
||||
err_unlock:
|
||||
mutex_unlock(&intf->mutex);
|
||||
}
|
||||
|
||||
static void gb_module_deregister_interface(struct gb_interface *intf)
|
||||
{
|
||||
/* Mark as disconnected to prevent I/O during disable. */
|
||||
if (intf->module->disconnected)
|
||||
intf->disconnected = true;
|
||||
|
||||
mutex_lock(&intf->mutex);
|
||||
intf->removed = true;
|
||||
gb_interface_disable(intf);
|
||||
gb_interface_deactivate(intf);
|
||||
mutex_unlock(&intf->mutex);
|
||||
|
||||
gb_interface_del(intf);
|
||||
}
|
||||
|
||||
/* Register a module and its interfaces. */
|
||||
int gb_module_add(struct gb_module *module)
|
||||
{
|
||||
size_t i;
|
||||
int ret;
|
||||
|
||||
ret = device_add(&module->dev);
|
||||
if (ret) {
|
||||
dev_err(&module->dev, "failed to register module: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_gb_module_add(module);
|
||||
|
||||
for (i = 0; i < module->num_interfaces; ++i)
|
||||
gb_module_register_interface(module->interfaces[i]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Deregister a module and its interfaces. */
|
||||
void gb_module_del(struct gb_module *module)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < module->num_interfaces; ++i)
|
||||
gb_module_deregister_interface(module->interfaces[i]);
|
||||
|
||||
trace_gb_module_del(module);
|
||||
|
||||
device_del(&module->dev);
|
||||
}
|
||||
|
||||
void gb_module_put(struct gb_module *module)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < module->num_interfaces; ++i)
|
||||
gb_interface_put(module->interfaces[i]);
|
||||
|
||||
put_device(&module->dev);
|
||||
}
|
1264
drivers/greybus/operation.c
普通文件
1264
drivers/greybus/operation.c
普通文件
文件差异内容过多而无法显示
加载差异
1397
drivers/greybus/svc.c
普通文件
1397
drivers/greybus/svc.c
普通文件
文件差异内容过多而无法显示
加载差异
197
drivers/greybus/svc_watchdog.c
普通文件
197
drivers/greybus/svc_watchdog.c
普通文件
@@ -0,0 +1,197 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* SVC Greybus "watchdog" driver.
|
||||
*
|
||||
* Copyright 2016 Google Inc.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/greybus.h>
|
||||
|
||||
#define SVC_WATCHDOG_PERIOD (2 * HZ)
|
||||
|
||||
struct gb_svc_watchdog {
|
||||
struct delayed_work work;
|
||||
struct gb_svc *svc;
|
||||
bool enabled;
|
||||
struct notifier_block pm_notifier;
|
||||
};
|
||||
|
||||
static struct delayed_work reset_work;
|
||||
|
||||
static int svc_watchdog_pm_notifier(struct notifier_block *notifier,
|
||||
unsigned long pm_event, void *unused)
|
||||
{
|
||||
struct gb_svc_watchdog *watchdog =
|
||||
container_of(notifier, struct gb_svc_watchdog, pm_notifier);
|
||||
|
||||
switch (pm_event) {
|
||||
case PM_SUSPEND_PREPARE:
|
||||
gb_svc_watchdog_disable(watchdog->svc);
|
||||
break;
|
||||
case PM_POST_SUSPEND:
|
||||
gb_svc_watchdog_enable(watchdog->svc);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void greybus_reset(struct work_struct *work)
|
||||
{
|
||||
static char const start_path[] = "/system/bin/start";
|
||||
static char *envp[] = {
|
||||
"HOME=/",
|
||||
"PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin",
|
||||
NULL,
|
||||
};
|
||||
static char *argv[] = {
|
||||
(char *)start_path,
|
||||
"unipro_reset",
|
||||
NULL,
|
||||
};
|
||||
|
||||
pr_err("svc_watchdog: calling \"%s %s\" to reset greybus network!\n",
|
||||
argv[0], argv[1]);
|
||||
call_usermodehelper(start_path, argv, envp, UMH_WAIT_EXEC);
|
||||
}
|
||||
|
||||
static void do_work(struct work_struct *work)
|
||||
{
|
||||
struct gb_svc_watchdog *watchdog;
|
||||
struct gb_svc *svc;
|
||||
int retval;
|
||||
|
||||
watchdog = container_of(work, struct gb_svc_watchdog, work.work);
|
||||
svc = watchdog->svc;
|
||||
|
||||
dev_dbg(&svc->dev, "%s: ping.\n", __func__);
|
||||
retval = gb_svc_ping(svc);
|
||||
if (retval) {
|
||||
/*
|
||||
* Something went really wrong, let's warn userspace and then
|
||||
* pull the plug and reset the whole greybus network.
|
||||
* We need to do this outside of this workqueue as we will be
|
||||
* tearing down the svc device itself. So queue up
|
||||
* yet-another-callback to do that.
|
||||
*/
|
||||
dev_err(&svc->dev,
|
||||
"SVC ping has returned %d, something is wrong!!!\n",
|
||||
retval);
|
||||
|
||||
if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) {
|
||||
panic("SVC is not responding\n");
|
||||
} else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) {
|
||||
dev_err(&svc->dev, "Resetting the greybus network, watch out!!!\n");
|
||||
|
||||
INIT_DELAYED_WORK(&reset_work, greybus_reset);
|
||||
schedule_delayed_work(&reset_work, HZ / 2);
|
||||
|
||||
/*
|
||||
* Disable ourselves, we don't want to trip again unless
|
||||
* userspace wants us to.
|
||||
*/
|
||||
watchdog->enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
/* resubmit our work to happen again, if we are still "alive" */
|
||||
if (watchdog->enabled)
|
||||
schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
|
||||
}
|
||||
|
||||
int gb_svc_watchdog_create(struct gb_svc *svc)
|
||||
{
|
||||
struct gb_svc_watchdog *watchdog;
|
||||
int retval;
|
||||
|
||||
if (svc->watchdog)
|
||||
return 0;
|
||||
|
||||
watchdog = kmalloc(sizeof(*watchdog), GFP_KERNEL);
|
||||
if (!watchdog)
|
||||
return -ENOMEM;
|
||||
|
||||
watchdog->enabled = false;
|
||||
watchdog->svc = svc;
|
||||
INIT_DELAYED_WORK(&watchdog->work, do_work);
|
||||
svc->watchdog = watchdog;
|
||||
|
||||
watchdog->pm_notifier.notifier_call = svc_watchdog_pm_notifier;
|
||||
retval = register_pm_notifier(&watchdog->pm_notifier);
|
||||
if (retval) {
|
||||
dev_err(&svc->dev, "error registering pm notifier(%d)\n",
|
||||
retval);
|
||||
goto svc_watchdog_create_err;
|
||||
}
|
||||
|
||||
retval = gb_svc_watchdog_enable(svc);
|
||||
if (retval) {
|
||||
dev_err(&svc->dev, "error enabling watchdog (%d)\n", retval);
|
||||
unregister_pm_notifier(&watchdog->pm_notifier);
|
||||
goto svc_watchdog_create_err;
|
||||
}
|
||||
return retval;
|
||||
|
||||
svc_watchdog_create_err:
|
||||
svc->watchdog = NULL;
|
||||
kfree(watchdog);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
void gb_svc_watchdog_destroy(struct gb_svc *svc)
|
||||
{
|
||||
struct gb_svc_watchdog *watchdog = svc->watchdog;
|
||||
|
||||
if (!watchdog)
|
||||
return;
|
||||
|
||||
unregister_pm_notifier(&watchdog->pm_notifier);
|
||||
gb_svc_watchdog_disable(svc);
|
||||
svc->watchdog = NULL;
|
||||
kfree(watchdog);
|
||||
}
|
||||
|
||||
bool gb_svc_watchdog_enabled(struct gb_svc *svc)
|
||||
{
|
||||
if (!svc || !svc->watchdog)
|
||||
return false;
|
||||
return svc->watchdog->enabled;
|
||||
}
|
||||
|
||||
int gb_svc_watchdog_enable(struct gb_svc *svc)
|
||||
{
|
||||
struct gb_svc_watchdog *watchdog;
|
||||
|
||||
if (!svc->watchdog)
|
||||
return -ENODEV;
|
||||
|
||||
watchdog = svc->watchdog;
|
||||
if (watchdog->enabled)
|
||||
return 0;
|
||||
|
||||
watchdog->enabled = true;
|
||||
schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gb_svc_watchdog_disable(struct gb_svc *svc)
|
||||
{
|
||||
struct gb_svc_watchdog *watchdog;
|
||||
|
||||
if (!svc->watchdog)
|
||||
return -ENODEV;
|
||||
|
||||
watchdog = svc->watchdog;
|
||||
if (!watchdog->enabled)
|
||||
return 0;
|
||||
|
||||
watchdog->enabled = false;
|
||||
cancel_delayed_work_sync(&watchdog->work);
|
||||
return 0;
|
||||
}
|
在新工单中引用
屏蔽一个用户