staging: greybus: move core include files to include/linux/greybus/

With the goal of moving the core of the greybus code out of staging, the
include files need to be moved to include/linux/greybus.h and
include/linux/greybus/

Cc: Vaibhav Hiremath <hvaibhav.linux@gmail.com>
Cc: Johan Hovold <johan@kernel.org>
Cc: Vaibhav Agarwal <vaibhav.sr@gmail.com>
Cc: Rui Miguel Silva <rmfrfs@gmail.com>
Cc: David Lin <dtwlin@gmail.com>
Cc: "Bryan O'Donoghue" <pure.logic@nexus-software.ie>
Cc: greybus-dev@lists.linaro.org
Cc: devel@driverdev.osuosl.org
Acked-by: Mark Greer <mgreer@animalcreek.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Acked-by: Alex Elder <elder@kernel.org>
Link: https://lore.kernel.org/r/20190825055429.18547-8-gregkh@linuxfoundation.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman
2019-08-25 07:54:27 +02:00
parent 9c31973907
commit ec0ad86817
53 changed files with 52 additions and 71 deletions

View File

@@ -0,0 +1,89 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus bundles
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*/
#ifndef __BUNDLE_H
#define __BUNDLE_H
#include <linux/list.h>
#define BUNDLE_ID_NONE U8_MAX
/* Greybus "public" definitions" */
struct gb_bundle {
struct device dev;
struct gb_interface *intf;
u8 id;
u8 class;
u8 class_major;
u8 class_minor;
size_t num_cports;
struct greybus_descriptor_cport *cport_desc;
struct list_head connections;
u8 *state;
struct list_head links; /* interface->bundles */
};
#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
/* Greybus "private" definitions" */
struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
u8 class);
int gb_bundle_add(struct gb_bundle *bundle);
void gb_bundle_destroy(struct gb_bundle *bundle);
/* Bundle Runtime PM wrappers */
#ifdef CONFIG_PM
static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
{
int retval;
retval = pm_runtime_get_sync(&bundle->dev);
if (retval < 0) {
dev_err(&bundle->dev,
"pm_runtime_get_sync failed: %d\n", retval);
pm_runtime_put_noidle(&bundle->dev);
return retval;
}
return 0;
}
static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
{
int retval;
pm_runtime_mark_last_busy(&bundle->dev);
retval = pm_runtime_put_autosuspend(&bundle->dev);
return retval;
}
static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
{
pm_runtime_get_noresume(&bundle->dev);
}
static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
{
pm_runtime_put_noidle(&bundle->dev);
}
#else
static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
{ return 0; }
static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
{ return 0; }
static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
#endif
#endif /* __BUNDLE_H */

View File

@@ -0,0 +1,128 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus connections
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*/
#ifndef __CONNECTION_H
#define __CONNECTION_H
#include <linux/list.h>
#include <linux/kfifo.h>
#define GB_CONNECTION_FLAG_CSD BIT(0)
#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
#define GB_CONNECTION_FLAG_OFFLOADED BIT(2)
#define GB_CONNECTION_FLAG_CDSI1 BIT(3)
#define GB_CONNECTION_FLAG_CONTROL BIT(4)
#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5)
#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL
enum gb_connection_state {
GB_CONNECTION_STATE_DISABLED = 0,
GB_CONNECTION_STATE_ENABLED_TX = 1,
GB_CONNECTION_STATE_ENABLED = 2,
GB_CONNECTION_STATE_DISCONNECTING = 3,
};
struct gb_operation;
typedef int (*gb_request_handler_t)(struct gb_operation *);
struct gb_connection {
struct gb_host_device *hd;
struct gb_interface *intf;
struct gb_bundle *bundle;
struct kref kref;
u16 hd_cport_id;
u16 intf_cport_id;
struct list_head hd_links;
struct list_head bundle_links;
gb_request_handler_t handler;
unsigned long flags;
struct mutex mutex;
spinlock_t lock;
enum gb_connection_state state;
struct list_head operations;
char name[16];
struct workqueue_struct *wq;
atomic_t op_cycle;
void *private;
bool mode_switch;
};
struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
u16 hd_cport_id, gb_request_handler_t handler);
struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
u16 cport_id, gb_request_handler_t handler);
struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
u16 cport_id, gb_request_handler_t handler,
unsigned long flags);
struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
u16 cport_id, unsigned long flags);
void gb_connection_destroy(struct gb_connection *connection);
static inline bool gb_connection_is_static(struct gb_connection *connection)
{
return !connection->intf;
}
int gb_connection_enable(struct gb_connection *connection);
int gb_connection_enable_tx(struct gb_connection *connection);
void gb_connection_disable_rx(struct gb_connection *connection);
void gb_connection_disable(struct gb_connection *connection);
void gb_connection_disable_forced(struct gb_connection *connection);
void gb_connection_mode_switch_prepare(struct gb_connection *connection);
void gb_connection_mode_switch_complete(struct gb_connection *connection);
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
u8 *data, size_t length);
void gb_connection_latency_tag_enable(struct gb_connection *connection);
void gb_connection_latency_tag_disable(struct gb_connection *connection);
static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
{
return !(connection->flags & GB_CONNECTION_FLAG_CSD);
}
static inline bool
gb_connection_flow_control_disabled(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
}
static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
}
static inline bool gb_connection_is_control(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_CONTROL;
}
static inline void *gb_connection_get_data(struct gb_connection *connection)
{
return connection->private;
}
static inline void gb_connection_set_data(struct gb_connection *connection,
void *data)
{
connection->private = data;
}
#endif /* __CONNECTION_H */

View File

@@ -0,0 +1,57 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus CPort control protocol
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*/
#ifndef __CONTROL_H
#define __CONTROL_H
struct gb_control {
struct device dev;
struct gb_interface *intf;
struct gb_connection *connection;
u8 protocol_major;
u8 protocol_minor;
bool has_bundle_activate;
bool has_bundle_version;
char *vendor_string;
char *product_string;
};
#define to_gb_control(d) container_of(d, struct gb_control, dev)
struct gb_control *gb_control_create(struct gb_interface *intf);
int gb_control_enable(struct gb_control *control);
void gb_control_disable(struct gb_control *control);
int gb_control_suspend(struct gb_control *control);
int gb_control_resume(struct gb_control *control);
int gb_control_add(struct gb_control *control);
void gb_control_del(struct gb_control *control);
struct gb_control *gb_control_get(struct gb_control *control);
void gb_control_put(struct gb_control *control);
int gb_control_get_bundle_versions(struct gb_control *control);
int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
int gb_control_disconnecting_operation(struct gb_control *control,
u16 cport_id);
int gb_control_mode_switch_operation(struct gb_control *control);
void gb_control_mode_switch_prepare(struct gb_control *control);
void gb_control_mode_switch_complete(struct gb_control *control);
int gb_control_get_manifest_size_operation(struct gb_interface *intf);
int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
size_t size);
int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
int gb_control_interface_suspend_prepare(struct gb_control *control);
int gb_control_interface_deactivate_prepare(struct gb_control *control);
int gb_control_interface_hibernate_abort(struct gb_control *control);
#endif /* __CONTROL_H */

View File

@@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* FIXME
* move this to include/linux/mod_devicetable.h when merging
*/
#ifndef __LINUX_GREYBUS_ID_H
#define __LINUX_GREYBUS_ID_H
#include <linux/types.h>
#include <linux/mod_devicetable.h>
struct greybus_bundle_id {
__u16 match_flags;
__u32 vendor;
__u32 product;
__u8 class;
kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t));
};
/* Used to match the greybus_bundle_id */
#define GREYBUS_ID_MATCH_VENDOR BIT(0)
#define GREYBUS_ID_MATCH_PRODUCT BIT(1)
#define GREYBUS_ID_MATCH_CLASS BIT(2)
#endif /* __LINUX_GREYBUS_ID_H */

View File

@@ -0,0 +1,178 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus manifest definition
*
* See "Greybus Application Protocol" document (version 0.1) for
* details on these values and structures.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*
* Released under the GPLv2 and BSD licenses.
*/
#ifndef __GREYBUS_MANIFEST_H
#define __GREYBUS_MANIFEST_H
enum greybus_descriptor_type {
GREYBUS_TYPE_INVALID = 0x00,
GREYBUS_TYPE_INTERFACE = 0x01,
GREYBUS_TYPE_STRING = 0x02,
GREYBUS_TYPE_BUNDLE = 0x03,
GREYBUS_TYPE_CPORT = 0x04,
};
enum greybus_protocol {
GREYBUS_PROTOCOL_CONTROL = 0x00,
/* 0x01 is unused */
GREYBUS_PROTOCOL_GPIO = 0x02,
GREYBUS_PROTOCOL_I2C = 0x03,
GREYBUS_PROTOCOL_UART = 0x04,
GREYBUS_PROTOCOL_HID = 0x05,
GREYBUS_PROTOCOL_USB = 0x06,
GREYBUS_PROTOCOL_SDIO = 0x07,
GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08,
GREYBUS_PROTOCOL_PWM = 0x09,
/* 0x0a is unused */
GREYBUS_PROTOCOL_SPI = 0x0b,
GREYBUS_PROTOCOL_DISPLAY = 0x0c,
GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d,
GREYBUS_PROTOCOL_SENSOR = 0x0e,
GREYBUS_PROTOCOL_LIGHTS = 0x0f,
GREYBUS_PROTOCOL_VIBRATOR = 0x10,
GREYBUS_PROTOCOL_LOOPBACK = 0x11,
GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12,
GREYBUS_PROTOCOL_AUDIO_DATA = 0x13,
GREYBUS_PROTOCOL_SVC = 0x14,
GREYBUS_PROTOCOL_BOOTROM = 0x15,
GREYBUS_PROTOCOL_CAMERA_DATA = 0x16,
GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17,
GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18,
GREYBUS_PROTOCOL_AUTHENTICATION = 0x19,
GREYBUS_PROTOCOL_LOG = 0x1a,
/* ... */
GREYBUS_PROTOCOL_RAW = 0xfe,
GREYBUS_PROTOCOL_VENDOR = 0xff,
};
enum greybus_class_type {
GREYBUS_CLASS_CONTROL = 0x00,
/* 0x01 is unused */
/* 0x02 is unused */
/* 0x03 is unused */
/* 0x04 is unused */
GREYBUS_CLASS_HID = 0x05,
/* 0x06 is unused */
/* 0x07 is unused */
GREYBUS_CLASS_POWER_SUPPLY = 0x08,
/* 0x09 is unused */
GREYBUS_CLASS_BRIDGED_PHY = 0x0a,
/* 0x0b is unused */
GREYBUS_CLASS_DISPLAY = 0x0c,
GREYBUS_CLASS_CAMERA = 0x0d,
GREYBUS_CLASS_SENSOR = 0x0e,
GREYBUS_CLASS_LIGHTS = 0x0f,
GREYBUS_CLASS_VIBRATOR = 0x10,
GREYBUS_CLASS_LOOPBACK = 0x11,
GREYBUS_CLASS_AUDIO = 0x12,
/* 0x13 is unused */
/* 0x14 is unused */
GREYBUS_CLASS_BOOTROM = 0x15,
GREYBUS_CLASS_FW_MANAGEMENT = 0x16,
GREYBUS_CLASS_LOG = 0x17,
/* ... */
GREYBUS_CLASS_RAW = 0xfe,
GREYBUS_CLASS_VENDOR = 0xff,
};
enum {
GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0),
};
/*
* The string in a string descriptor is not NUL-terminated. The
* size of the descriptor will be rounded up to a multiple of 4
* bytes, by padding the string with 0x00 bytes if necessary.
*/
struct greybus_descriptor_string {
__u8 length;
__u8 id;
__u8 string[0];
} __packed;
/*
* An interface descriptor describes information about an interface as a whole,
* *not* the functions within it.
*/
struct greybus_descriptor_interface {
__u8 vendor_stringid;
__u8 product_stringid;
__u8 features;
__u8 pad;
} __packed;
/*
* An bundle descriptor defines an identification number and a class for
* each bundle.
*
* @id: Uniquely identifies a bundle within a interface, its sole purpose is to
* allow CPort descriptors to specify which bundle they are associated with.
* The first bundle will have id 0, second will have 1 and so on.
*
* The largest CPort id associated with an bundle (defined by a
* CPort descriptor in the manifest) is used to determine how to
* encode the device id and module number in UniPro packets
* that use the bundle.
*
* @class: It is used by kernel to know the functionality provided by the
* bundle and will be matched against drivers functinality while probing greybus
* driver. It should contain one of the values defined in
* 'enum greybus_class_type'.
*
*/
struct greybus_descriptor_bundle {
__u8 id; /* interface-relative id (0..) */
__u8 class;
__u8 pad[2];
} __packed;
/*
* A CPort descriptor indicates the id of the bundle within the
* module it's associated with, along with the CPort id used to
* address the CPort. The protocol id defines the format of messages
* exchanged using the CPort.
*/
struct greybus_descriptor_cport {
__le16 id;
__u8 bundle;
__u8 protocol_id; /* enum greybus_protocol */
} __packed;
struct greybus_descriptor_header {
__le16 size;
__u8 type; /* enum greybus_descriptor_type */
__u8 pad;
} __packed;
struct greybus_descriptor {
struct greybus_descriptor_header header;
union {
struct greybus_descriptor_string string;
struct greybus_descriptor_interface interface;
struct greybus_descriptor_bundle bundle;
struct greybus_descriptor_cport cport;
};
} __packed;
struct greybus_manifest_header {
__le16 size;
__u8 version_major;
__u8 version_minor;
} __packed;
struct greybus_manifest {
struct greybus_manifest_header header;
struct greybus_descriptor descriptors[0];
} __packed;
#endif /* __GREYBUS_MANIFEST_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,82 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus Host Device
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
*/
#ifndef __HD_H
#define __HD_H
struct gb_host_device;
struct gb_message;
struct gb_hd_driver {
size_t hd_priv_size;
int (*cport_allocate)(struct gb_host_device *hd, int cport_id,
unsigned long flags);
void (*cport_release)(struct gb_host_device *hd, u16 cport_id);
int (*cport_enable)(struct gb_host_device *hd, u16 cport_id,
unsigned long flags);
int (*cport_disable)(struct gb_host_device *hd, u16 cport_id);
int (*cport_connected)(struct gb_host_device *hd, u16 cport_id);
int (*cport_flush)(struct gb_host_device *hd, u16 cport_id);
int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id,
u8 phase, unsigned int timeout);
int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id,
size_t peer_space, unsigned int timeout);
int (*cport_clear)(struct gb_host_device *hd, u16 cport_id);
int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id,
struct gb_message *message, gfp_t gfp_mask);
void (*message_cancel)(struct gb_message *message);
int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id);
int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id);
int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
bool async);
};
struct gb_host_device {
struct device dev;
int bus_id;
const struct gb_hd_driver *driver;
struct list_head modules;
struct list_head connections;
struct ida cport_id_map;
/* Number of CPorts supported by the UniPro IP */
size_t num_cports;
/* Host device buffer constraints */
size_t buffer_size_max;
struct gb_svc *svc;
/* Private data for the host driver */
unsigned long hd_priv[0] __aligned(sizeof(s64));
};
#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id);
void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id);
int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
unsigned long flags);
void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id);
struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
struct device *parent,
size_t buffer_size_max,
size_t num_cports);
int gb_hd_add(struct gb_host_device *hd);
void gb_hd_del(struct gb_host_device *hd);
void gb_hd_shutdown(struct gb_host_device *hd);
void gb_hd_put(struct gb_host_device *hd);
int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
bool in_irq);
int gb_hd_init(void);
void gb_hd_exit(void);
#endif /* __HD_H */

View File

@@ -0,0 +1,82 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus Interface Block code
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*/
#ifndef __INTERFACE_H
#define __INTERFACE_H
enum gb_interface_type {
GB_INTERFACE_TYPE_INVALID = 0,
GB_INTERFACE_TYPE_UNKNOWN,
GB_INTERFACE_TYPE_DUMMY,
GB_INTERFACE_TYPE_UNIPRO,
GB_INTERFACE_TYPE_GREYBUS,
};
#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0)
#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1)
#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2)
#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3)
#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4)
#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5)
#define GB_INTERFACE_QUIRK_NO_PM BIT(6)
struct gb_interface {
struct device dev;
struct gb_control *control;
struct list_head bundles;
struct list_head module_node;
struct list_head manifest_descs;
u8 interface_id; /* Physical location within the Endo */
u8 device_id;
u8 features; /* Feature flags set in the manifest */
enum gb_interface_type type;
u32 ddbl1_manufacturer_id;
u32 ddbl1_product_id;
u32 vendor_id;
u32 product_id;
u64 serial_number;
struct gb_host_device *hd;
struct gb_module *module;
unsigned long quirks;
struct mutex mutex;
bool disconnected;
bool ejected;
bool removed;
bool active;
bool enabled;
bool mode_switch;
bool dme_read;
struct work_struct mode_switch_work;
struct completion mode_switch_completion;
};
#define to_gb_interface(d) container_of(d, struct gb_interface, dev)
struct gb_interface *gb_interface_create(struct gb_module *module,
u8 interface_id);
int gb_interface_activate(struct gb_interface *intf);
void gb_interface_deactivate(struct gb_interface *intf);
int gb_interface_enable(struct gb_interface *intf);
void gb_interface_disable(struct gb_interface *intf);
int gb_interface_add(struct gb_interface *intf);
void gb_interface_del(struct gb_interface *intf);
void gb_interface_put(struct gb_interface *intf);
void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
u32 mailbox);
int gb_interface_request_mode_switch(struct gb_interface *intf);
#endif /* __INTERFACE_H */

View File

@@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus manifest parsing
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*/
#ifndef __MANIFEST_H
#define __MANIFEST_H
struct gb_interface;
bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size);
#endif /* __MANIFEST_H */

View File

@@ -0,0 +1,33 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus Module code
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*/
#ifndef __MODULE_H
#define __MODULE_H
struct gb_module {
struct device dev;
struct gb_host_device *hd;
struct list_head hd_node;
u8 module_id;
size_t num_interfaces;
bool disconnected;
struct gb_interface *interfaces[0];
};
#define to_gb_module(d) container_of(d, struct gb_module, dev)
struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
size_t num_interfaces);
int gb_module_add(struct gb_module *module);
void gb_module_del(struct gb_module *module);
void gb_module_put(struct gb_module *module);
#endif /* __MODULE_H */

View File

@@ -0,0 +1,224 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus operations
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*/
#ifndef __OPERATION_H
#define __OPERATION_H
#include <linux/completion.h>
struct gb_operation;
/* The default amount of time a request is given to complete */
#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
/*
* The top bit of the type in an operation message header indicates
* whether the message is a request (bit clear) or response (bit set)
*/
#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80)
enum gb_operation_result {
GB_OP_SUCCESS = 0x00,
GB_OP_INTERRUPTED = 0x01,
GB_OP_TIMEOUT = 0x02,
GB_OP_NO_MEMORY = 0x03,
GB_OP_PROTOCOL_BAD = 0x04,
GB_OP_OVERFLOW = 0x05,
GB_OP_INVALID = 0x06,
GB_OP_RETRY = 0x07,
GB_OP_NONEXISTENT = 0x08,
GB_OP_UNKNOWN_ERROR = 0xfe,
GB_OP_MALFUNCTION = 0xff,
};
#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr)
#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX
/*
* Protocol code should only examine the payload and payload_size fields, and
* host-controller drivers may use the hcpriv field. All other fields are
* intended to be private to the operations core code.
*/
struct gb_message {
struct gb_operation *operation;
struct gb_operation_msg_hdr *header;
void *payload;
size_t payload_size;
void *buffer;
void *hcpriv;
};
#define GB_OPERATION_FLAG_INCOMING BIT(0)
#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1)
#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2)
#define GB_OPERATION_FLAG_CORE BIT(3)
#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \
GB_OPERATION_FLAG_UNIDIRECTIONAL)
/*
* A Greybus operation is a remote procedure call performed over a
* connection between two UniPro interfaces.
*
* Every operation consists of a request message sent to the other
* end of the connection coupled with a reply message returned to
* the sender. Every operation has a type, whose interpretation is
* dependent on the protocol associated with the connection.
*
* Only four things in an operation structure are intended to be
* directly usable by protocol handlers: the operation's connection
* pointer; the operation type; the request message payload (and
* size); and the response message payload (and size). Note that a
* message with a 0-byte payload has a null message payload pointer.
*
* In addition, every operation has a result, which is an errno
* value. Protocol handlers access the operation result using
* gb_operation_result().
*/
typedef void (*gb_operation_callback)(struct gb_operation *);
struct gb_operation {
struct gb_connection *connection;
struct gb_message *request;
struct gb_message *response;
unsigned long flags;
u8 type;
u16 id;
int errno; /* Operation result */
struct work_struct work;
gb_operation_callback callback;
struct completion completion;
struct timer_list timer;
struct kref kref;
atomic_t waiters;
int active;
struct list_head links; /* connection->operations */
void *private;
};
static inline bool
gb_operation_is_incoming(struct gb_operation *operation)
{
return operation->flags & GB_OPERATION_FLAG_INCOMING;
}
static inline bool
gb_operation_is_unidirectional(struct gb_operation *operation)
{
return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL;
}
static inline bool
gb_operation_short_response_allowed(struct gb_operation *operation)
{
return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE;
}
static inline bool gb_operation_is_core(struct gb_operation *operation)
{
return operation->flags & GB_OPERATION_FLAG_CORE;
}
void gb_connection_recv(struct gb_connection *connection,
void *data, size_t size);
int gb_operation_result(struct gb_operation *operation);
size_t gb_operation_get_payload_size_max(struct gb_connection *connection);
struct gb_operation *
gb_operation_create_flags(struct gb_connection *connection,
u8 type, size_t request_size,
size_t response_size, unsigned long flags,
gfp_t gfp);
static inline struct gb_operation *
gb_operation_create(struct gb_connection *connection,
u8 type, size_t request_size,
size_t response_size, gfp_t gfp)
{
return gb_operation_create_flags(connection, type, request_size,
response_size, 0, gfp);
}
struct gb_operation *
gb_operation_create_core(struct gb_connection *connection,
u8 type, size_t request_size,
size_t response_size, unsigned long flags,
gfp_t gfp);
void gb_operation_get(struct gb_operation *operation);
void gb_operation_put(struct gb_operation *operation);
bool gb_operation_response_alloc(struct gb_operation *operation,
size_t response_size, gfp_t gfp);
int gb_operation_request_send(struct gb_operation *operation,
gb_operation_callback callback,
unsigned int timeout,
gfp_t gfp);
int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
unsigned int timeout);
static inline int
gb_operation_request_send_sync(struct gb_operation *operation)
{
return gb_operation_request_send_sync_timeout(operation,
GB_OPERATION_TIMEOUT_DEFAULT);
}
void gb_operation_cancel(struct gb_operation *operation, int errno);
void gb_operation_cancel_incoming(struct gb_operation *operation, int errno);
void greybus_message_sent(struct gb_host_device *hd,
struct gb_message *message, int status);
int gb_operation_sync_timeout(struct gb_connection *connection, int type,
void *request, int request_size,
void *response, int response_size,
unsigned int timeout);
int gb_operation_unidirectional_timeout(struct gb_connection *connection,
int type, void *request, int request_size,
unsigned int timeout);
static inline int gb_operation_sync(struct gb_connection *connection, int type,
void *request, int request_size,
void *response, int response_size)
{
return gb_operation_sync_timeout(connection, type,
request, request_size, response, response_size,
GB_OPERATION_TIMEOUT_DEFAULT);
}
static inline int gb_operation_unidirectional(struct gb_connection *connection,
int type, void *request, int request_size)
{
return gb_operation_unidirectional_timeout(connection, type,
request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
}
static inline void *gb_operation_get_data(struct gb_operation *operation)
{
return operation->private;
}
static inline void gb_operation_set_data(struct gb_operation *operation,
void *data)
{
operation->private = data;
}
int gb_operation_init(void);
void gb_operation_exit(void);
#endif /* !__OPERATION_H */

101
include/linux/greybus/svc.h Normal file
View File

@@ -0,0 +1,101 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus SVC code
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
*/
#ifndef __SVC_H
#define __SVC_H
#define GB_SVC_CPORT_FLAG_E2EFC BIT(0)
#define GB_SVC_CPORT_FLAG_CSD_N BIT(1)
#define GB_SVC_CPORT_FLAG_CSV_N BIT(2)
enum gb_svc_state {
GB_SVC_STATE_RESET,
GB_SVC_STATE_PROTOCOL_VERSION,
GB_SVC_STATE_SVC_HELLO,
};
enum gb_svc_watchdog_bite {
GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0,
GB_SVC_WATCHDOG_BITE_PANIC_KERNEL,
};
struct gb_svc_watchdog;
struct svc_debugfs_pwrmon_rail {
u8 id;
struct gb_svc *svc;
};
struct gb_svc {
struct device dev;
struct gb_host_device *hd;
struct gb_connection *connection;
enum gb_svc_state state;
struct ida device_id_map;
struct workqueue_struct *wq;
u16 endo_id;
u8 ap_intf_id;
u8 protocol_major;
u8 protocol_minor;
struct gb_svc_watchdog *watchdog;
enum gb_svc_watchdog_bite action;
struct dentry *debugfs_dentry;
struct svc_debugfs_pwrmon_rail *pwrmon_rails;
};
#define to_gb_svc(d) container_of(d, struct gb_svc, dev)
struct gb_svc *gb_svc_create(struct gb_host_device *hd);
int gb_svc_add(struct gb_svc *svc);
void gb_svc_del(struct gb_svc *svc);
void gb_svc_put(struct gb_svc *svc);
int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
u8 measurement_type, u32 *value);
int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id);
int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
u8 intf2_id, u8 dev2_id);
void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id);
int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
u8 intf2_id, u16 cport2_id, u8 cport_flags);
void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
u8 intf2_id, u16 cport2_id);
int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id);
int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable);
int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable);
int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable);
int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type);
int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id);
int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
u32 *value);
int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
u32 value);
int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
u8 tx_amplitude, u8 tx_hs_equalizer,
u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
u8 flags, u32 quirks,
struct gb_svc_l2_timer_cfg *local,
struct gb_svc_l2_timer_cfg *remote);
int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id);
int gb_svc_ping(struct gb_svc *svc);
int gb_svc_watchdog_create(struct gb_svc *svc);
void gb_svc_watchdog_destroy(struct gb_svc *svc);
bool gb_svc_watchdog_enabled(struct gb_svc *svc);
int gb_svc_watchdog_enable(struct gb_svc *svc);
int gb_svc_watchdog_disable(struct gb_svc *svc);
int gb_svc_protocol_init(void);
void gb_svc_protocol_exit(void);
#endif /* __SVC_H */