Merge tag 'usb-4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb

Pull USB and PHY updates from Greg KH:
 "Here is the big USB pull request for 4.18-rc1.

  Lots of stuff here, the highlights are:

   - phy driver updates and new additions

   - usual set of xhci driver updates

   - normal set of musb updates

   - gadget driver updates and new controllers

   - typec work, it's getting closer to getting fully out of the staging
     portion of the tree.

   - lots of minor cleanups and bugfixes.

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'usb-4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (263 commits)
  Revert "xhci: Reset Renesas uPD72020x USB controller for 32-bit DMA issue"
  xhci: Add quirk to zero 64bit registers on Renesas PCIe controllers
  xhci: Allow more than 32 quirks
  usb: xhci: force all memory allocations to node
  selftests: add test for USB over IP driver
  USB: typec: fsusb302: no need to check return value of debugfs_create_dir()
  USB: gadget: udc: s3c2410_udc: no need to check return value of debugfs_create functions
  USB: gadget: udc: renesas_usb3: no need to check return value of debugfs_create functions
  USB: gadget: udc: pxa27x_udc: no need to check return value of debugfs_create functions
  USB: gadget: udc: gr_udc: no need to check return value of debugfs_create functions
  USB: gadget: udc: bcm63xx_udc: no need to check return value of debugfs_create functions
  USB: udc: atmel_usba_udc: no need to check return value of debugfs_create functions
  USB: dwc3: no need to check return value of debugfs_create functions
  USB: dwc2: no need to check return value of debugfs_create functions
  USB: core: no need to check return value of debugfs_create functions
  USB: chipidea: no need to check return value of debugfs_create functions
  USB: ehci-hcd: no need to check return value of debugfs_create functions
  USB: fhci-hcd: no need to check return value of debugfs_create functions
  USB: fotg210-hcd: no need to check return value of debugfs_create functions
  USB: imx21-hcd: no need to check return value of debugfs_create functions
  ...
This commit is contained in:
Linus Torvalds
2018-06-05 16:14:12 -07:00
211 changed files with 11440 additions and 3522 deletions

View File

@@ -450,7 +450,7 @@ void hw_phymode_configure(struct ci_hdrc *ci);
void ci_platform_configure(struct ci_hdrc *ci);
int dbg_create_files(struct ci_hdrc *ci);
void dbg_create_files(struct ci_hdrc *ci);
void dbg_remove_files(struct ci_hdrc *ci);
#endif /* __DRIVERS_USB_CHIPIDEA_CI_H */

View File

@@ -291,7 +291,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
pdata.usb_phy = data->phy;
if (of_device_is_compatible(np, "fsl,imx53-usb") && pdata.usb_phy &&
if ((of_device_is_compatible(np, "fsl,imx53-usb") ||
of_device_is_compatible(np, "fsl,imx51-usb")) && pdata.usb_phy &&
of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) {
pdata.flags |= CI_HDRC_OVERRIDE_PHY_CONTROL;
data->override_phy_control = true;

View File

@@ -1062,9 +1062,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci_hdrc_otg_fsm_start(ci);
device_set_wakeup_capable(&pdev->dev, true);
ret = dbg_create_files(ci);
if (ret)
goto stop;
dbg_create_files(ci);
ret = sysfs_create_group(&dev->kobj, &ci_attr_group);
if (ret)

View File

@@ -340,54 +340,28 @@ DEFINE_SHOW_ATTRIBUTE(ci_registers);
*
* This function returns an error code
*/
int dbg_create_files(struct ci_hdrc *ci)
void dbg_create_files(struct ci_hdrc *ci)
{
struct dentry *dent;
ci->debugfs = debugfs_create_dir(dev_name(ci->dev), NULL);
if (!ci->debugfs)
return -ENOMEM;
dent = debugfs_create_file("device", S_IRUGO, ci->debugfs, ci,
&ci_device_fops);
if (!dent)
goto err;
dent = debugfs_create_file("port_test", S_IRUGO | S_IWUSR, ci->debugfs,
ci, &ci_port_test_fops);
if (!dent)
goto err;
dent = debugfs_create_file("qheads", S_IRUGO, ci->debugfs, ci,
&ci_qheads_fops);
if (!dent)
goto err;
dent = debugfs_create_file("requests", S_IRUGO, ci->debugfs, ci,
&ci_requests_fops);
if (!dent)
goto err;
debugfs_create_file("device", S_IRUGO, ci->debugfs, ci,
&ci_device_fops);
debugfs_create_file("port_test", S_IRUGO | S_IWUSR, ci->debugfs, ci,
&ci_port_test_fops);
debugfs_create_file("qheads", S_IRUGO, ci->debugfs, ci,
&ci_qheads_fops);
debugfs_create_file("requests", S_IRUGO, ci->debugfs, ci,
&ci_requests_fops);
if (ci_otg_is_fsm_mode(ci)) {
dent = debugfs_create_file("otg", S_IRUGO, ci->debugfs, ci,
&ci_otg_fops);
if (!dent)
goto err;
debugfs_create_file("otg", S_IRUGO, ci->debugfs, ci,
&ci_otg_fops);
}
dent = debugfs_create_file("role", S_IRUGO | S_IWUSR, ci->debugfs, ci,
&ci_role_fops);
if (!dent)
goto err;
dent = debugfs_create_file("registers", S_IRUGO, ci->debugfs, ci,
&ci_registers_fops);
if (dent)
return 0;
err:
debugfs_remove_recursive(ci->debugfs);
return -ENOMEM;
debugfs_create_file("role", S_IRUGO | S_IWUSR, ci->debugfs, ci,
&ci_role_fops);
debugfs_create_file("registers", S_IRUGO, ci->debugfs, ci,
&ci_registers_fops);
}
/**

View File

@@ -21,7 +21,6 @@
#include <linux/usb/tmc.h>
#define RIGOL 1
#define USBTMC_HEADER_SIZE 12
#define USBTMC_MINOR_BASE 176
@@ -93,8 +92,6 @@ struct usbtmc_device_data {
/* coalesced usb488_caps from usbtmc_dev_capabilities */
__u8 usb488_caps;
u8 rigol_quirk;
/* attributes from the USB TMC spec for this device */
u8 TermChar;
bool TermCharEnabled;
@@ -110,17 +107,6 @@ struct usbtmc_device_data {
};
#define to_usbtmc_data(d) container_of(d, struct usbtmc_device_data, kref)
struct usbtmc_ID_rigol_quirk {
__u16 idVendor;
__u16 idProduct;
};
static const struct usbtmc_ID_rigol_quirk usbtmc_id_quirk[] = {
{ 0x1ab1, 0x0588 },
{ 0x1ab1, 0x04b0 },
{ 0, 0 }
};
/* Forward declarations */
static struct usb_driver usbtmc_driver;
@@ -603,16 +589,14 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
goto exit;
}
if (data->rigol_quirk) {
dev_dbg(dev, "usb_bulk_msg_in: count(%zu)\n", count);
dev_dbg(dev, "usb_bulk_msg_in: count(%zu)\n", count);
retval = send_request_dev_dep_msg_in(data, count);
retval = send_request_dev_dep_msg_in(data, count);
if (retval < 0) {
if (data->auto_abort)
usbtmc_ioctl_abort_bulk_out(data);
goto exit;
}
if (retval < 0) {
if (data->auto_abort)
usbtmc_ioctl_abort_bulk_out(data);
goto exit;
}
/* Loop until we have fetched everything we requested */
@@ -621,23 +605,6 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
done = 0;
while (remaining > 0) {
if (!data->rigol_quirk) {
dev_dbg(dev, "usb_bulk_msg_in: remaining(%zu), count(%zu)\n", remaining, count);
if (remaining > USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE - 3)
this_part = USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE - 3;
else
this_part = remaining;
retval = send_request_dev_dep_msg_in(data, this_part);
if (retval < 0) {
dev_err(dev, "usb_bulk_msg returned %d\n", retval);
if (data->auto_abort)
usbtmc_ioctl_abort_bulk_out(data);
goto exit;
}
}
/* Send bulk URB */
retval = usb_bulk_msg(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev,
@@ -658,7 +625,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
}
/* Parse header in first packet */
if ((done == 0) || !data->rigol_quirk) {
if (done == 0) {
/* Sanity checks for the header */
if (actual < USBTMC_HEADER_SIZE) {
dev_err(dev, "Device sent too small first packet: %u < %u\n", actual, USBTMC_HEADER_SIZE);
@@ -698,20 +665,11 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
actual -= USBTMC_HEADER_SIZE;
/* Check if the message is smaller than requested */
if (data->rigol_quirk) {
if (remaining > n_characters)
remaining = n_characters;
/* Remove padding if it exists */
if (actual > remaining)
actual = remaining;
}
else {
if (this_part > n_characters)
this_part = n_characters;
/* Remove padding if it exists */
if (actual > this_part)
actual = this_part;
}
if (remaining > n_characters)
remaining = n_characters;
/* Remove padding if it exists */
if (actual > remaining)
actual = remaining;
dev_dbg(dev, "Bulk-IN header: N_characters(%u), bTransAttr(%u)\n", n_characters, buffer[8]);
@@ -1365,7 +1323,6 @@ static int usbtmc_probe(struct usb_interface *intf,
struct usbtmc_device_data *data;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in;
int n;
int retcode;
dev_dbg(&intf->dev, "%s called\n", __func__);
@@ -1385,20 +1342,6 @@ static int usbtmc_probe(struct usb_interface *intf,
atomic_set(&data->srq_asserted, 0);
data->zombie = 0;
/* Determine if it is a Rigol or not */
data->rigol_quirk = 0;
dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n",
le16_to_cpu(data->usb_dev->descriptor.idVendor),
le16_to_cpu(data->usb_dev->descriptor.idProduct));
for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) {
if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) &&
(usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) {
dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n");
data->rigol_quirk = 1;
break;
}
}
/* Initialize USBTMC bTag and other fields */
data->bTag = 1;
data->TermCharEnabled = 0;

View File

@@ -33,7 +33,6 @@
#include <linux/phy/phy.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/phy.h>
#include <linux/usb/otg.h>
#include "usb.h"
@@ -568,6 +567,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
switch (wValue & 0xff00) {
case USB_DT_DEVICE << 8:
switch (hcd->speed) {
case HCD_USB32:
case HCD_USB31:
bufp = usb31_rh_dev_descriptor;
break;
@@ -592,6 +592,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
break;
case USB_DT_CONFIG << 8:
switch (hcd->speed) {
case HCD_USB32:
case HCD_USB31:
case HCD_USB3:
bufp = ss_rh_config_descriptor;
@@ -2742,34 +2743,14 @@ int usb_add_hcd(struct usb_hcd *hcd,
int retval;
struct usb_device *rhdev;
if (IS_ENABLED(CONFIG_USB_PHY) && !hcd->skip_phy_initialization) {
struct usb_phy *phy = usb_get_phy_dev(hcd->self.sysdev, 0);
if (IS_ERR(phy)) {
retval = PTR_ERR(phy);
if (retval == -EPROBE_DEFER)
return retval;
} else {
retval = usb_phy_init(phy);
if (retval) {
usb_put_phy(phy);
return retval;
}
hcd->usb_phy = phy;
hcd->remove_phy = 1;
}
}
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
if (IS_ERR(hcd->phy_roothub)) {
retval = PTR_ERR(hcd->phy_roothub);
goto err_phy_roothub_alloc;
}
if (IS_ERR(hcd->phy_roothub))
return PTR_ERR(hcd->phy_roothub);
retval = usb_phy_roothub_init(hcd->phy_roothub);
if (retval)
goto err_phy_roothub_alloc;
return retval;
retval = usb_phy_roothub_power_on(hcd->phy_roothub);
if (retval)
@@ -2819,6 +2800,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
hcd->self.root_hub = rhdev;
mutex_unlock(&usb_port_peer_mutex);
rhdev->rx_lanes = 1;
rhdev->tx_lanes = 1;
switch (hcd->speed) {
case HCD_USB11:
rhdev->speed = USB_SPEED_FULL;
@@ -2832,6 +2816,10 @@ int usb_add_hcd(struct usb_hcd *hcd,
case HCD_USB3:
rhdev->speed = USB_SPEED_SUPER;
break;
case HCD_USB32:
rhdev->rx_lanes = 2;
rhdev->tx_lanes = 2;
/* fall through */
case HCD_USB31:
rhdev->speed = USB_SPEED_SUPER_PLUS;
break;
@@ -2943,12 +2931,7 @@ err_create_buf:
usb_phy_roothub_power_off(hcd->phy_roothub);
err_usb_phy_roothub_power_on:
usb_phy_roothub_exit(hcd->phy_roothub);
err_phy_roothub_alloc:
if (hcd->remove_phy && hcd->usb_phy) {
usb_phy_shutdown(hcd->usb_phy);
usb_put_phy(hcd->usb_phy);
hcd->usb_phy = NULL;
}
return retval;
}
EXPORT_SYMBOL_GPL(usb_add_hcd);
@@ -3024,12 +3007,6 @@ void usb_remove_hcd(struct usb_hcd *hcd)
usb_phy_roothub_power_off(hcd->phy_roothub);
usb_phy_roothub_exit(hcd->phy_roothub);
if (hcd->remove_phy && hcd->usb_phy) {
usb_phy_shutdown(hcd->usb_phy);
usb_put_phy(hcd->usb_phy);
hcd->usb_phy = NULL;
}
usb_put_invalidate_rhdev(hcd);
hcd->flags = 0;
}

View File

@@ -2636,7 +2636,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define SET_ADDRESS_TRIES 2
#define GET_DESCRIPTOR_TRIES 2
#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1))
#define USE_NEW_SCHEME(i) ((i) / 2 == (int)old_scheme_first)
#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme)
#define HUB_ROOT_RESET_TIME 60 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
@@ -2651,12 +2651,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
* enumeration failures, so disable this enumeration scheme for USB3
* devices.
*/
static bool use_new_scheme(struct usb_device *udev, int retry)
static bool use_new_scheme(struct usb_device *udev, int retry,
struct usb_port *port_dev)
{
int old_scheme_first_port =
port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME;
if (udev->speed >= USB_SPEED_SUPER)
return false;
return USE_NEW_SCHEME(retry);
return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first);
}
/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
@@ -2751,6 +2755,14 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (!udev)
return 0;
if (hub_is_superspeedplus(hub->hdev)) {
/* extended portstatus Rx and Tx lane count are zero based */
udev->rx_lanes = USB_EXT_PORT_RX_LANES(ext_portstatus) + 1;
udev->tx_lanes = USB_EXT_PORT_TX_LANES(ext_portstatus) + 1;
} else {
udev->rx_lanes = 1;
udev->tx_lanes = 1;
}
if (hub_is_wusb(hub))
udev->speed = USB_SPEED_WIRELESS;
else if (hub_is_superspeedplus(hub->hdev) &&
@@ -2867,7 +2879,11 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
done:
if (status == 0) {
/* TRSTRCY = 10 ms; plus some extra */
msleep(10 + 40);
if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM)
usleep_range(10000, 12000);
else
msleep(10 + 40);
if (udev) {
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
@@ -3376,6 +3392,10 @@ static int wait_for_connected(struct usb_device *udev,
while (delay_ms < 2000) {
if (status || *portstatus & USB_PORT_STAT_CONNECTION)
break;
if (!port_is_power_on(hub, *portstatus)) {
status = -ENODEV;
break;
}
msleep(20);
delay_ms += 20;
status = hub_port_status(hub, *port1, portstatus, portchange);
@@ -4380,6 +4400,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
struct usb_port *port_dev = hub->ports[port1 - 1];
int retries, operations, retval, i;
unsigned delay = HUB_SHORT_RESET_TIME;
enum usb_device_speed oldspeed = udev->speed;
@@ -4501,7 +4522,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
bool did_new_scheme = false;
if (use_new_scheme(udev, retry_counter)) {
if (use_new_scheme(udev, retry_counter, port_dev)) {
struct usb_device_descriptor *buf;
int r = 0;
@@ -4551,7 +4572,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* reset. But only on the first attempt,
* lest we get into a time out/reset loop
*/
if (r == 0 || (r == -ETIMEDOUT && retries == 0))
if (r == 0 || (r == -ETIMEDOUT &&
retries == 0 &&
udev->speed > USB_SPEED_FULL))
break;
}
udev->descriptor.bMaxPacketSize0 =
@@ -4598,9 +4621,12 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
if (udev->speed >= USB_SPEED_SUPER) {
devnum = udev->devnum;
dev_info(&udev->dev,
"%s SuperSpeed%s USB device number %d using %s\n",
"%s SuperSpeed%s%s USB device number %d using %s\n",
(udev->config) ? "reset" : "new",
(udev->speed == USB_SPEED_SUPER_PLUS) ? "Plus" : "",
(udev->speed == USB_SPEED_SUPER_PLUS) ?
"Plus Gen 2" : " Gen 1",
(udev->rx_lanes == 2 && udev->tx_lanes == 2) ?
"x2" : "",
devnum, driver_name);
}

View File

@@ -98,6 +98,7 @@ struct usb_port {
struct mutex status_lock;
u32 over_current_count;
u8 portnum;
u32 quirks;
unsigned int is_superspeed:1;
unsigned int usb3_lpm_u1_permit:1;
unsigned int usb3_lpm_u2_permit:1;

View File

@@ -940,7 +940,7 @@ int usb_set_isoch_delay(struct usb_device *dev)
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_ISOCH_DELAY,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
cpu_to_le16(dev->hub_delay), 0, NULL, 0,
dev->hub_delay, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}

View File

@@ -50,6 +50,28 @@ static ssize_t over_current_count_show(struct device *dev,
}
static DEVICE_ATTR_RO(over_current_count);
static ssize_t quirks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
return sprintf(buf, "%08x\n", port_dev->quirks);
}
static ssize_t quirks_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_port *port_dev = to_usb_port(dev);
u32 value;
if (kstrtou32(buf, 16, &value))
return -EINVAL;
port_dev->quirks = value;
return count;
}
static DEVICE_ATTR_RW(quirks);
static ssize_t usb3_lpm_permit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -118,6 +140,7 @@ static DEVICE_ATTR_RW(usb3_lpm_permit);
static struct attribute *port_dev_attrs[] = {
&dev_attr_connect_type.attr,
&dev_attr_quirks.attr,
&dev_attr_over_current_count.attr,
NULL,
};

View File

@@ -175,6 +175,26 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(speed);
static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->rx_lanes);
}
static DEVICE_ATTR_RO(rx_lanes);
static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->tx_lanes);
}
static DEVICE_ATTR_RO(tx_lanes);
static ssize_t busnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -790,6 +810,8 @@ static struct attribute *dev_attrs[] = {
&dev_attr_bNumConfigurations.attr,
&dev_attr_bMaxPacketSize0.attr,
&dev_attr_speed.attr,
&dev_attr_rx_lanes.attr,
&dev_attr_tx_lanes.attr,
&dev_attr_busnum.attr,
&dev_attr_devnum.attr,
&dev_attr_devpath.attr,

View File

@@ -1167,30 +1167,16 @@ static struct notifier_block usb_bus_nb = {
struct dentry *usb_debug_root;
EXPORT_SYMBOL_GPL(usb_debug_root);
static struct dentry *usb_debug_devices;
static int usb_debugfs_init(void)
static void usb_debugfs_init(void)
{
usb_debug_root = debugfs_create_dir("usb", NULL);
if (!usb_debug_root)
return -ENOENT;
usb_debug_devices = debugfs_create_file("devices", 0444,
usb_debug_root, NULL,
&usbfs_devices_fops);
if (!usb_debug_devices) {
debugfs_remove(usb_debug_root);
usb_debug_root = NULL;
return -ENOENT;
}
return 0;
debugfs_create_file("devices", 0444, usb_debug_root, NULL,
&usbfs_devices_fops);
}
static void usb_debugfs_cleanup(void)
{
debugfs_remove(usb_debug_devices);
debugfs_remove(usb_debug_root);
debugfs_remove_recursive(usb_debug_root);
}
/*
@@ -1205,9 +1191,7 @@ static int __init usb_init(void)
}
usb_init_pool_max();
retval = usb_debugfs_init();
if (retval)
goto out;
usb_debugfs_init();
usb_acpi_register();
retval = bus_register(&usb_bus_type);

View File

@@ -419,6 +419,8 @@ static void dwc2_wait_for_mode(struct dwc2_hsotg *hsotg,
/**
* dwc2_iddig_filter_enabled() - Returns true if the IDDIG debounce
* filter is enabled.
*
* @hsotg: Programming view of DWC_otg controller
*/
static bool dwc2_iddig_filter_enabled(struct dwc2_hsotg *hsotg)
{
@@ -564,6 +566,9 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
* If a force is done, it requires a IDDIG debounce filter delay if
* the filter is configured and enabled. We poll the current mode of
* the controller to account for this delay.
*
* @hsotg: Programming view of DWC_otg controller
* @host: Host mode flag
*/
void dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
{
@@ -610,6 +615,8 @@ void dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
* or not because the value of the connector ID status is affected by
* the force mode. We only need to call this once during probe if
* dr_mode == OTG.
*
* @hsotg: Programming view of DWC_otg controller
*/
static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
{

View File

@@ -164,12 +164,11 @@ struct dwc2_hsotg_req;
* and has yet to be completed (maybe due to data move, or simply
* awaiting an ack from the core all the data has been completed).
* @debugfs: File entry for debugfs file for this endpoint.
* @lock: State lock to protect contents of endpoint.
* @dir_in: Set to true if this endpoint is of the IN direction, which
* means that it is sending data to the Host.
* @index: The index for the endpoint registers.
* @mc: Multi Count - number of transactions per microframe
* @interval - Interval for periodic endpoints, in frames or microframes.
* @interval: Interval for periodic endpoints, in frames or microframes.
* @name: The name array passed to the USB core.
* @halted: Set if the endpoint has been halted.
* @periodic: Set if this is a periodic ep, such as Interrupt
@@ -178,10 +177,11 @@ struct dwc2_hsotg_req;
* @desc_list_dma: The DMA address of descriptor chain currently in use.
* @desc_list: Pointer to descriptor DMA chain head currently in use.
* @desc_count: Count of entries within the DMA descriptor chain of EP.
* @isoc_chain_num: Number of ISOC chain currently in use - either 0 or 1.
* @next_desc: index of next free descriptor in the ISOC chain under SW control.
* @compl_desc: index of next descriptor to be completed by xFerComplete
* @total_data: The total number of data bytes done.
* @fifo_size: The size of the FIFO (for periodic IN endpoints)
* @fifo_index: For Dedicated FIFO operation, only FIFO0 can be used for EP0.
* @fifo_load: The amount of data loaded into the FIFO (periodic IN)
* @last_load: The offset of data for the last start of request.
* @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
@@ -231,8 +231,8 @@ struct dwc2_hsotg_ep {
struct dwc2_dma_desc *desc_list;
u8 desc_count;
unsigned char isoc_chain_num;
unsigned int next_desc;
unsigned int compl_desc;
char name[10];
};
@@ -380,6 +380,12 @@ enum dwc2_ep0_state {
* is FS.
* 0 - No (default)
* 1 - Yes
* @ipg_isoc_en: Indicates the IPG supports is enabled or disabled.
* 0 - Disable (default)
* 1 - Enable
* @acg_enable: For enabling Active Clock Gating in the controller
* 0 - No
* 1 - Yes
* @ulpi_fs_ls: Make ULPI phy operate in FS/LS mode only
* 0 - No (default)
* 1 - Yes
@@ -511,6 +517,7 @@ struct dwc2_core_params {
bool hird_threshold_en;
u8 hird_threshold;
bool activate_stm_fs_transceiver;
bool ipg_isoc_en;
u16 max_packet_count;
u32 max_transfer_size;
u32 ahbcfg;
@@ -548,7 +555,7 @@ struct dwc2_core_params {
*
* The values that are not in dwc2_core_params are documented below.
*
* @op_mode Mode of Operation
* @op_mode: Mode of Operation
* 0 - HNP- and SRP-Capable OTG (Host & Device)
* 1 - SRP-Capable OTG (Host & Device)
* 2 - Non-HNP and Non-SRP Capable OTG (Host & Device)
@@ -556,43 +563,102 @@ struct dwc2_core_params {
* 4 - Non-OTG Device
* 5 - SRP-Capable Host
* 6 - Non-OTG Host
* @arch Architecture
* @arch: Architecture
* 0 - Slave only
* 1 - External DMA
* 2 - Internal DMA
* @power_optimized Are power optimizations enabled?
* @num_dev_ep Number of device endpoints available
* @num_dev_in_eps Number of device IN endpoints available
* @num_dev_perio_in_ep Number of device periodic IN endpoints
* available
* @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
* @ipg_isoc_en: This feature indicates that the controller supports
* the worst-case scenario of Rx followed by Rx
* Interpacket Gap (IPG) (32 bitTimes) as per the utmi
* specification for any token following ISOC OUT token.
* 0 - Don't support
* 1 - Support
* @power_optimized: Are power optimizations enabled?
* @num_dev_ep: Number of device endpoints available
* @num_dev_in_eps: Number of device IN endpoints available
* @num_dev_perio_in_ep: Number of device periodic IN endpoints
* available
* @dev_token_q_depth: Device Mode IN Token Sequence Learning Queue
* Depth
* 0 to 30
* @host_perio_tx_q_depth
* @host_perio_tx_q_depth:
* Host Mode Periodic Request Queue Depth
* 2, 4 or 8
* @nperio_tx_q_depth
* @nperio_tx_q_depth:
* Non-Periodic Request Queue Depth
* 2, 4 or 8
* @hs_phy_type High-speed PHY interface type
* @hs_phy_type: High-speed PHY interface type
* 0 - High-speed interface not supported
* 1 - UTMI+
* 2 - ULPI
* 3 - UTMI+ and ULPI
* @fs_phy_type Full-speed PHY interface type
* @fs_phy_type: Full-speed PHY interface type
* 0 - Full speed interface not supported
* 1 - Dedicated full speed interface
* 2 - FS pins shared with UTMI+ pins
* 3 - FS pins shared with ULPI pins
* @total_fifo_size: Total internal RAM for FIFOs (bytes)
* @hibernation Is hibernation enabled?
* @utmi_phy_data_width UTMI+ PHY data width
* @hibernation: Is hibernation enabled?
* @utmi_phy_data_width: UTMI+ PHY data width
* 0 - 8 bits
* 1 - 16 bits
* 2 - 8 or 16 bits
* @snpsid: Value from SNPSID register
* @dev_ep_dirs: Direction of device endpoints (GHWCFG1)
* @g_tx_fifo_size[] Power-on values of TxFIFO sizes
* @g_tx_fifo_size: Power-on values of TxFIFO sizes
* @dma_desc_enable: When DMA mode is enabled, specifies whether to use
* address DMA mode or descriptor DMA mode for accessing
* the data FIFOs. The driver will automatically detect the
* value for this if none is specified.
* 0 - Address DMA
* 1 - Descriptor DMA (default, if available)
* @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters
* 1 - Allow dynamic FIFO sizing (default, if available)
* @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
* are enabled for non-periodic IN endpoints in device
* mode.
* @host_nperio_tx_fifo_size: Number of 4-byte words in the non-periodic Tx FIFO
* in host mode when dynamic FIFO sizing is enabled
* 16 to 32768
* Actual maximum value is autodetected and also
* the default.
* @host_perio_tx_fifo_size: Number of 4-byte words in the periodic Tx FIFO in
* host mode when dynamic FIFO sizing is enabled
* 16 to 32768
* Actual maximum value is autodetected and also
* the default.
* @max_transfer_size: The maximum transfer size supported, in bytes
* 2047 to 65,535
* Actual maximum value is autodetected and also
* the default.
* @max_packet_count: The maximum number of packets in a transfer
* 15 to 511
* Actual maximum value is autodetected and also
* the default.
* @host_channels: The number of host channel registers to use
* 1 to 16
* Actual maximum value is autodetected and also
* the default.
* @dev_nperio_tx_fifo_size: Number of 4-byte words in the non-periodic Tx FIFO
* in device mode when dynamic FIFO sizing is enabled
* 16 to 32768
* Actual maximum value is autodetected and also
* the default.
* @i2c_enable: Specifies whether to use the I2Cinterface for a full
* speed PHY. This parameter is only applicable if phy_type
* is FS.
* 0 - No (default)
* 1 - Yes
* @acg_enable: For enabling Active Clock Gating in the controller
* 0 - Disable
* 1 - Enable
* @lpm_mode: For enabling Link Power Management in the controller
* 0 - Disable
* 1 - Enable
* @rx_fifo_size: Number of 4-byte words in the Rx FIFO when dynamic
* FIFO sizing is enabled 16 to 32768
* Actual maximum value is autodetected and also
* the default.
*/
struct dwc2_hw_params {
unsigned op_mode:3;
@@ -622,6 +688,7 @@ struct dwc2_hw_params {
unsigned hibernation:1;
unsigned utmi_phy_data_width:2;
unsigned lpm_mode:1;
unsigned ipg_isoc_en:1;
u32 snpsid;
u32 dev_ep_dirs;
u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
@@ -642,7 +709,11 @@ struct dwc2_hw_params {
* @gi2cctl: Backup of GI2CCTL register
* @glpmcfg: Backup of GLPMCFG register
* @gdfifocfg: Backup of GDFIFOCFG register
* @pcgcctl: Backup of PCGCCTL register
* @pcgcctl1: Backup of PCGCCTL1 register
* @dtxfsiz: Backup of DTXFSIZ registers for each endpoint
* @gpwrdn: Backup of GPWRDN register
* @valid: True if registers values backuped.
*/
struct dwc2_gregs_backup {
u32 gotgctl;
@@ -675,6 +746,7 @@ struct dwc2_gregs_backup {
* @doeptsiz: Backup of DOEPTSIZ register
* @doepdma: Backup of DOEPDMA register
* @dtxfsiz: Backup of DTXFSIZ registers for each endpoint
* @valid: True if registers values backuped.
*/
struct dwc2_dregs_backup {
u32 dcfg;
@@ -698,9 +770,10 @@ struct dwc2_dregs_backup {
* @hcfg: Backup of HCFG register
* @haintmsk: Backup of HAINTMSK register
* @hcintmsk: Backup of HCINTMSK register
* @hptr0: Backup of HPTR0 register
* @hprt0: Backup of HPTR0 register
* @hfir: Backup of HFIR register
* @hptxfsiz: Backup of HPTXFSIZ register
* @valid: True if registers values backuped.
*/
struct dwc2_hregs_backup {
u32 hcfg;
@@ -800,7 +873,7 @@ struct dwc2_hregs_backup {
* @regs: Pointer to controller regs
* @hw_params: Parameters that were autodetected from the
* hardware registers
* @core_params: Parameters that define how the core should be configured
* @params: Parameters that define how the core should be configured
* @op_state: The operational State, during transitions (a_host=>
* a_peripheral and b_device=>b_host) this may not match
* the core, but allows the software to determine
@@ -809,10 +882,13 @@ struct dwc2_hregs_backup {
* - USB_DR_MODE_PERIPHERAL
* - USB_DR_MODE_HOST
* - USB_DR_MODE_OTG
* @hcd_enabled Host mode sub-driver initialization indicator.
* @gadget_enabled Peripheral mode sub-driver initialization indicator.
* @ll_hw_enabled Status of low-level hardware resources.
* @hcd_enabled: Host mode sub-driver initialization indicator.
* @gadget_enabled: Peripheral mode sub-driver initialization indicator.
* @ll_hw_enabled: Status of low-level hardware resources.
* @hibernated: True if core is hibernated
* @frame_number: Frame number read from the core. For both device
* and host modes. The value ranges are from 0
* to HFNUM_MAX_FRNUM.
* @phy: The otg phy transceiver structure for phy control.
* @uphy: The otg phy transceiver structure for old USB phy
* control.
@@ -832,13 +908,25 @@ struct dwc2_hregs_backup {
* interrupt
* @wkp_timer: Timer object for handling Wakeup Detected interrupt
* @lx_state: Lx state of connected device
* @gregs_backup: Backup of global registers during suspend
* @dregs_backup: Backup of device registers during suspend
* @hregs_backup: Backup of host registers during suspend
* @gr_backup: Backup of global registers during suspend
* @dr_backup: Backup of device registers during suspend
* @hr_backup: Backup of host registers during suspend
*
* These are for host mode:
*
* @flags: Flags for handling root port state changes
* @flags.d32: Contain all root port flags
* @flags.b: Separate root port flags from each other
* @flags.b.port_connect_status_change: True if root port connect status
* changed
* @flags.b.port_connect_status: True if device connected to root port
* @flags.b.port_reset_change: True if root port reset status changed
* @flags.b.port_enable_change: True if root port enable status changed
* @flags.b.port_suspend_change: True if root port suspend status changed
* @flags.b.port_over_current_change: True if root port over current state
* changed.
* @flags.b.port_l1_change: True if root port l1 status changed
* @flags.b.reserved: Reserved bits of root port register
* @non_periodic_sched_inactive: Inactive QHs in the non-periodic schedule.
* Transfers associated with these QHs are not currently
* assigned to a host channel.
@@ -847,6 +935,9 @@ struct dwc2_hregs_backup {
* assigned to a host channel.
* @non_periodic_qh_ptr: Pointer to next QH to process in the active
* non-periodic schedule
* @non_periodic_sched_waiting: Waiting QHs in the non-periodic schedule.
* Transfers associated with these QHs are not currently
* assigned to a host channel.
* @periodic_sched_inactive: Inactive QHs in the periodic schedule. This is a
* list of QHs for periodic transfers that are _not_
* scheduled for the next frame. Each QH in the list has an
@@ -886,8 +977,6 @@ struct dwc2_hregs_backup {
* @hs_periodic_bitmap: Bitmap used by the microframe scheduler any time the
* host is in high speed mode; low speed schedules are
* stored elsewhere since we need one per TT.
* @frame_number: Frame number read from the core at SOF. The value ranges
* from 0 to HFNUM_MAX_FRNUM.
* @periodic_qh_count: Count of periodic QHs, if using several eps. Used for
* SOF enable/disable.
* @free_hc_list: Free host channels in the controller. This is a list of
@@ -898,8 +987,8 @@ struct dwc2_hregs_backup {
* host channel is available for non-periodic transactions.
* @non_periodic_channels: Number of host channels assigned to non-periodic
* transfers
* @available_host_channels Number of host channels available for the microframe
* scheduler to use
* @available_host_channels: Number of host channels available for the
* microframe scheduler to use
* @hc_ptr_array: Array of pointers to the host channel descriptors.
* Allows accessing a host channel descriptor given the
* host channel number. This is useful in interrupt
@@ -922,9 +1011,6 @@ struct dwc2_hregs_backup {
* @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
* @num_of_eps: Number of available EPs (excluding EP0)
* @debug_root: Root directrory for debugfs.
* @debug_file: Main status file for debugfs.
* @debug_testmode: Testmode status file for debugfs.
* @debug_fifo: FIFO status file for debugfs.
* @ep0_reply: Request used for ep0 reply.
* @ep0_buff: Buffer for EP0 reply data, if needed.
* @ctrl_buff: Buffer for EP0 control requests.
@@ -939,7 +1025,37 @@ struct dwc2_hregs_backup {
* @ctrl_in_desc: EP0 IN data phase desc chain pointer
* @ctrl_out_desc_dma: EP0 OUT data phase desc chain DMA address
* @ctrl_out_desc: EP0 OUT data phase desc chain pointer
* @eps: The endpoints being supplied to the gadget framework
* @irq: Interrupt request line number
* @clk: Pointer to otg clock
* @reset: Pointer to dwc2 reset controller
* @reset_ecc: Pointer to dwc2 optional reset controller in Stratix10.
* @regset: A pointer to a struct debugfs_regset32, which contains
* a pointer to an array of register definitions, the
* array size and the base address where the register bank
* is to be found.
* @bus_suspended: True if bus is suspended
* @last_frame_num: Number of last frame. Range from 0 to 32768
* @frame_num_array: Used only if CONFIG_USB_DWC2_TRACK_MISSED_SOFS is
* defined, for missed SOFs tracking. Array holds that
* frame numbers, which not equal to last_frame_num +1
* @last_frame_num_array: Used only if CONFIG_USB_DWC2_TRACK_MISSED_SOFS is
* defined, for missed SOFs tracking.
* If current_frame_number != last_frame_num+1
* then last_frame_num added to this array
* @frame_num_idx: Actual size of frame_num_array and last_frame_num_array
* @dumped_frame_num_array: 1 - if missed SOFs frame numbers dumbed
* 0 - if missed SOFs frame numbers not dumbed
* @fifo_mem: Total internal RAM for FIFOs (bytes)
* @fifo_map: Each bit intend for concrete fifo. If that bit is set,
* then that fifo is used
* @gadget: Represents a usb slave device
* @connected: Used in slave mode. True if device connected with host
* @eps_in: The IN endpoints being supplied to the gadget framework
* @eps_out: The OUT endpoints being supplied to the gadget framework
* @new_connection: Used in host mode. True if there are new connected
* device
* @enabled: Indicates the enabling state of controller
*
*/
struct dwc2_hsotg {
struct device *dev;
@@ -954,6 +1070,7 @@ struct dwc2_hsotg {
unsigned int gadget_enabled:1;
unsigned int ll_hw_enabled:1;
unsigned int hibernated:1;
u16 frame_number;
struct phy *phy;
struct usb_phy *uphy;
@@ -1029,7 +1146,6 @@ struct dwc2_hsotg {
u16 periodic_usecs;
unsigned long hs_periodic_bitmap[
DIV_ROUND_UP(DWC2_HS_SCHEDULE_US, BITS_PER_LONG)];
u16 frame_number;
u16 periodic_qh_count;
bool bus_suspended;
bool new_connection;

View File

@@ -778,6 +778,14 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
goto out;
}
/* Reading current frame number value in device or host modes. */
if (dwc2_is_device_mode(hsotg))
hsotg->frame_number = (dwc2_readl(hsotg->regs + DSTS)
& DSTS_SOFFN_MASK) >> DSTS_SOFFN_SHIFT;
else
hsotg->frame_number = (dwc2_readl(hsotg->regs + HFNUM)
& HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
gintsts = dwc2_read_common_intr(hsotg);
if (gintsts & ~GINTSTS_PRTINT)
retval = IRQ_HANDLED;

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/**
/*
* debug.h - Designware USB2 DRD controller debug header
*
* Copyright (C) 2015 Intel Corporation

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/**
/*
* debugfs.c - Designware USB2 DRD controller debugfs
*
* Copyright (C) 2015 Intel Corporation
@@ -16,12 +16,13 @@
#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
/**
* testmode_write - debugfs: change usb test mode
* @seq: The seq file to write to.
* @v: Unused parameter.
*
* This debugfs entry modify the current usb test mode.
* testmode_write() - change usb test mode state.
* @file: The file to write to.
* @ubuf: The buffer where user wrote.
* @count: The ubuf size.
* @ppos: Unused parameter.
*/
static ssize_t testmode_write(struct file *file, const char __user *ubuf, size_t
count, loff_t *ppos)
@@ -55,9 +56,9 @@ static ssize_t testmode_write(struct file *file, const char __user *ubuf, size_t
}
/**
* testmode_show - debugfs: show usb test mode state
* @seq: The seq file to write to.
* @v: Unused parameter.
* testmode_show() - debugfs: show usb test mode state
* @s: The seq file to write to.
* @unused: Unused parameter.
*
* This debugfs entry shows which usb test mode is currently enabled.
*/
@@ -293,52 +294,30 @@ DEFINE_SHOW_ATTRIBUTE(ep);
static void dwc2_hsotg_create_debug(struct dwc2_hsotg *hsotg)
{
struct dentry *root;
struct dentry *file;
unsigned int epidx;
root = hsotg->debug_root;
/* create general state file */
file = debugfs_create_file("state", 0444, root, hsotg, &state_fops);
if (IS_ERR(file))
dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
file = debugfs_create_file("testmode", 0644, root, hsotg,
&testmode_fops);
if (IS_ERR(file))
dev_err(hsotg->dev, "%s: failed to create testmode\n",
__func__);
file = debugfs_create_file("fifo", 0444, root, hsotg, &fifo_fops);
if (IS_ERR(file))
dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
debugfs_create_file("state", 0444, root, hsotg, &state_fops);
debugfs_create_file("testmode", 0644, root, hsotg, &testmode_fops);
debugfs_create_file("fifo", 0444, root, hsotg, &fifo_fops);
/* Create one file for each out endpoint */
for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
struct dwc2_hsotg_ep *ep;
ep = hsotg->eps_out[epidx];
if (ep) {
file = debugfs_create_file(ep->name, 0444,
root, ep, &ep_fops);
if (IS_ERR(file))
dev_err(hsotg->dev, "failed to create %s debug file\n",
ep->name);
}
if (ep)
debugfs_create_file(ep->name, 0444, root, ep, &ep_fops);
}
/* Create one file for each in endpoint. EP0 is handled with out eps */
for (epidx = 1; epidx < hsotg->num_of_eps; epidx++) {
struct dwc2_hsotg_ep *ep;
ep = hsotg->eps_in[epidx];
if (ep) {
file = debugfs_create_file(ep->name, 0444,
root, ep, &ep_fops);
if (IS_ERR(file))
dev_err(hsotg->dev, "failed to create %s debug file\n",
ep->name);
}
if (ep)
debugfs_create_file(ep->name, 0444, root, ep, &ep_fops);
}
}
#else
@@ -368,7 +347,7 @@ static const struct debugfs_reg32 dwc2_regs[] = {
dump_register(GINTSTS),
dump_register(GINTMSK),
dump_register(GRXSTSR),
dump_register(GRXSTSP),
/* Omit GRXSTSP */
dump_register(GRXFSIZ),
dump_register(GNPTXFSIZ),
dump_register(GNPTXSTS),
@@ -710,6 +689,7 @@ static int params_show(struct seq_file *seq, void *v)
print_param(seq, p, phy_ulpi_ddr);
print_param(seq, p, phy_ulpi_ext_vbus);
print_param(seq, p, i2c_enable);
print_param(seq, p, ipg_isoc_en);
print_param(seq, p, ulpi_fs_ls);
print_param(seq, p, host_support_fs_ls_low_power);
print_param(seq, p, host_ls_low_power_phy_clk);
@@ -790,32 +770,14 @@ DEFINE_SHOW_ATTRIBUTE(dr_mode);
int dwc2_debugfs_init(struct dwc2_hsotg *hsotg)
{
int ret;
struct dentry *file;
struct dentry *root;
hsotg->debug_root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
if (!hsotg->debug_root) {
ret = -ENOMEM;
goto err0;
}
root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
hsotg->debug_root = root;
file = debugfs_create_file("params", 0444,
hsotg->debug_root,
hsotg, &params_fops);
if (IS_ERR(file))
dev_err(hsotg->dev, "%s: failed to create params\n", __func__);
file = debugfs_create_file("hw_params", 0444,
hsotg->debug_root,
hsotg, &hw_params_fops);
if (IS_ERR(file))
dev_err(hsotg->dev, "%s: failed to create hw_params\n",
__func__);
file = debugfs_create_file("dr_mode", 0444,
hsotg->debug_root,
hsotg, &dr_mode_fops);
if (IS_ERR(file))
dev_err(hsotg->dev, "%s: failed to create dr_mode\n", __func__);
debugfs_create_file("params", 0444, root, hsotg, &params_fops);
debugfs_create_file("hw_params", 0444, root, hsotg, &hw_params_fops);
debugfs_create_file("dr_mode", 0444, root, hsotg, &dr_mode_fops);
/* Add gadget debugfs nodes */
dwc2_hsotg_create_debug(hsotg);
@@ -824,24 +786,18 @@ int dwc2_debugfs_init(struct dwc2_hsotg *hsotg)
GFP_KERNEL);
if (!hsotg->regset) {
ret = -ENOMEM;
goto err1;
goto err;
}
hsotg->regset->regs = dwc2_regs;
hsotg->regset->nregs = ARRAY_SIZE(dwc2_regs);
hsotg->regset->base = hsotg->regs;
file = debugfs_create_regset32("regdump", 0444, hsotg->debug_root,
hsotg->regset);
if (!file) {
ret = -ENOMEM;
goto err1;
}
debugfs_create_regset32("regdump", 0444, root, hsotg->regset);
return 0;
err1:
err:
debugfs_remove_recursive(hsotg->debug_root);
err0:
return ret;
}

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/**
/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
@@ -107,7 +107,6 @@ static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
/**
* dwc2_gadget_incr_frame_num - Increments the targeted frame number.
* @hs_ep: The endpoint
* @increment: The value to increment by
*
* This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
* If an overrun occurs it will wrap the value and set the frame_overrun flag.
@@ -190,6 +189,8 @@ static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
/**
* dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
{
@@ -204,6 +205,8 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
/**
* dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
* device mode TX FIFOs
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
{
@@ -227,6 +230,8 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
/**
* dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
* TX FIFOs
*
* @hsotg: Programming view of the DWC_otg controller
*/
int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
{
@@ -327,6 +332,7 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
}
/**
* dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure
* @ep: USB endpoint to allocate request for.
* @flags: Allocation flags
*
@@ -793,9 +799,7 @@ static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
* @dma_buff: usb requests dma buffer.
* @len: usb request transfer length.
*
* Finds out index of first free entry either in the bottom or up half of
* descriptor chain depend on which is under SW control and not processed
* by HW. Then fills that descriptor with the data of the arrived usb request,
* Fills next free descriptor with the data of the arrived usb request,
* frame info, sets Last and IOC bits increments next_desc. If filled
* descriptor is not the first one, removes L bit from the previous descriptor
* status.
@@ -810,34 +814,17 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
u32 mask = 0;
maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
if (len > maxsize) {
dev_err(hsotg->dev, "wrong len %d\n", len);
return -EINVAL;
}
/*
* If SW has already filled half of chain, then return and wait for
* the other chain to be processed by HW.
*/
if (hs_ep->next_desc == MAX_DMA_DESC_NUM_GENERIC / 2)
return -EBUSY;
/* Increment frame number by interval for IN */
if (hs_ep->dir_in)
dwc2_gadget_incr_frame_num(hs_ep);
index = (MAX_DMA_DESC_NUM_GENERIC / 2) * hs_ep->isoc_chain_num +
hs_ep->next_desc;
/* Sanity check of calculated index */
if ((hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC) ||
(!hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC / 2)) {
dev_err(hsotg->dev, "wrong index %d for iso chain\n", index);
return -EINVAL;
}
index = hs_ep->next_desc;
desc = &hs_ep->desc_list[index];
/* Check if descriptor chain full */
if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) ==
DEV_DMA_BUFF_STS_HREADY) {
dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
return 1;
}
/* Clear L bit of previous desc if more than one entries in the chain */
if (hs_ep->next_desc)
hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
@@ -865,8 +852,14 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
desc->status &= ~DEV_DMA_BUFF_STS_MASK;
desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
/* Increment frame number by interval for IN */
if (hs_ep->dir_in)
dwc2_gadget_incr_frame_num(hs_ep);
/* Update index of last configured entry in the chain */
hs_ep->next_desc++;
if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC)
hs_ep->next_desc = 0;
return 0;
}
@@ -875,11 +868,8 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
* dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
* @hs_ep: The isochronous endpoint.
*
* Prepare first descriptor chain for isochronous endpoints. Afterwards
* Prepare descriptor chain for isochronous endpoints. Afterwards
* write DMA address to HW and enable the endpoint.
*
* Switch between descriptor chains via isoc_chain_num to give SW opportunity
* to prepare second descriptor chain while first one is being processed by HW.
*/
static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
{
@@ -887,24 +877,34 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
struct dwc2_hsotg_req *hs_req, *treq;
int index = hs_ep->index;
int ret;
int i;
u32 dma_reg;
u32 depctl;
u32 ctrl;
struct dwc2_dma_desc *desc;
if (list_empty(&hs_ep->queue)) {
dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
return;
}
/* Initialize descriptor chain by Host Busy status */
for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) {
desc = &hs_ep->desc_list[i];
desc->status = 0;
desc->status |= (DEV_DMA_BUFF_STS_HBUSY
<< DEV_DMA_BUFF_STS_SHIFT);
}
hs_ep->next_desc = 0;
list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
hs_req->req.length);
if (ret) {
dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
if (ret)
break;
}
}
hs_ep->compl_desc = 0;
depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
@@ -914,10 +914,6 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
ctrl = dwc2_readl(hsotg->regs + depctl);
ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
dwc2_writel(ctrl, hsotg->regs + depctl);
/* Switch ISOC descriptor chain number being processed by SW*/
hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
hs_ep->next_desc = 0;
}
/**
@@ -1235,7 +1231,7 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
u32 target_frame = hs_ep->target_frame;
u32 current_frame = dwc2_hsotg_read_frameno(hsotg);
u32 current_frame = hsotg->frame_number;
bool frame_overrun = hs_ep->frame_overrun;
if (!frame_overrun && current_frame >= target_frame)
@@ -1291,6 +1287,9 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
struct dwc2_hsotg *hs = hs_ep->parent;
bool first;
int ret;
u32 maxsize = 0;
u32 mask = 0;
dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
ep->name, req, req->length, req->buf, req->no_interrupt,
@@ -1308,6 +1307,24 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
req->actual = 0;
req->status = -EINPROGRESS;
/* In DDMA mode for ISOC's don't queue request if length greater
* than descriptor limits.
*/
if (using_desc_dma(hs) && hs_ep->isochronous) {
maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
if (hs_ep->dir_in && req->length > maxsize) {
dev_err(hs->dev, "wrong length %d (maxsize=%d)\n",
req->length, maxsize);
return -EINVAL;
}
if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n",
req->length, hs_ep->ep.maxpacket);
return -EINVAL;
}
}
ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
if (ret)
return ret;
@@ -1330,17 +1347,15 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
/*
* Handle DDMA isochronous transfers separately - just add new entry
* to the half of descriptor chain that is not processed by HW.
* to the descriptor chain.
* Transfer will be started once SW gets either one of NAK or
* OutTknEpDis interrupts.
*/
if (using_desc_dma(hs) && hs_ep->isochronous &&
hs_ep->target_frame != TARGET_FRAME_INITIAL) {
ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
hs_req->req.length);
if (ret)
dev_dbg(hs->dev, "%s: ISO desc chain full\n", __func__);
if (using_desc_dma(hs) && hs_ep->isochronous) {
if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
hs_req->req.length);
}
return 0;
}
@@ -1350,8 +1365,15 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
return 0;
}
while (dwc2_gadget_target_frame_elapsed(hs_ep))
/* Update current frame number value. */
hs->frame_number = dwc2_hsotg_read_frameno(hs);
while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
dwc2_gadget_incr_frame_num(hs_ep);
/* Update current frame number value once more as it
* changes here.
*/
hs->frame_number = dwc2_hsotg_read_frameno(hs);
}
if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
@@ -2011,108 +2033,75 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
* @hs_ep: The endpoint the request was on.
*
* Get first request from the ep queue, determine descriptor on which complete
* happened. SW based on isoc_chain_num discovers which half of the descriptor
* chain is currently in use by HW, adjusts dma_address and calculates index
* of completed descriptor based on the value of DEPDMA register. Update actual
* length of request, giveback to gadget.
* happened. SW discovers which descriptor currently in use by HW, adjusts
* dma_address and calculates index of completed descriptor based on the value
* of DEPDMA register. Update actual length of request, giveback to gadget.
*/
static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
struct dwc2_hsotg_req *hs_req;
struct usb_request *ureq;
int index;
dma_addr_t dma_addr;
u32 dma_reg;
u32 depdma;
u32 desc_sts;
u32 mask;
hs_req = get_ep_head(hs_ep);
if (!hs_req) {
dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
return;
desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
/* Process only descriptors with buffer status set to DMA done */
while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >>
DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) {
hs_req = get_ep_head(hs_ep);
if (!hs_req) {
dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
return;
}
ureq = &hs_req->req;
/* Check completion status */
if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT ==
DEV_DMA_STS_SUCC) {
mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
DEV_DMA_ISOC_RX_NBYTES_MASK;
ureq->actual = ureq->length - ((desc_sts & mask) >>
DEV_DMA_ISOC_NBYTES_SHIFT);
/* Adjust actual len for ISOC Out if len is
* not align of 4
*/
if (!hs_ep->dir_in && ureq->length & 0x3)
ureq->actual += 4 - (ureq->length & 0x3);
}
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
hs_ep->compl_desc++;
if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1))
hs_ep->compl_desc = 0;
desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
}
ureq = &hs_req->req;
dma_addr = hs_ep->desc_list_dma;
/*
* If lower half of descriptor chain is currently use by SW,
* that means higher half is being processed by HW, so shift
* DMA address to higher half of descriptor chain.
*/
if (!hs_ep->isoc_chain_num)
dma_addr += sizeof(struct dwc2_dma_desc) *
(MAX_DMA_DESC_NUM_GENERIC / 2);
dma_reg = hs_ep->dir_in ? DIEPDMA(hs_ep->index) : DOEPDMA(hs_ep->index);
depdma = dwc2_readl(hsotg->regs + dma_reg);
index = (depdma - dma_addr) / sizeof(struct dwc2_dma_desc) - 1;
desc_sts = hs_ep->desc_list[index].status;
mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
DEV_DMA_ISOC_RX_NBYTES_MASK;
ureq->actual = ureq->length -
((desc_sts & mask) >> DEV_DMA_ISOC_NBYTES_SHIFT);
/* Adjust actual length for ISOC Out if length is not align of 4 */
if (!hs_ep->dir_in && ureq->length & 0x3)
ureq->actual += 4 - (ureq->length & 0x3);
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
}
/*
* dwc2_gadget_start_next_isoc_ddma - start next isoc request, if any.
* @hs_ep: The isochronous endpoint to be re-enabled.
* dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC.
* @hs_ep: The isochronous endpoint.
*
* If ep has been disabled due to last descriptor servicing (IN endpoint) or
* BNA (OUT endpoint) check the status of other half of descriptor chain that
* was under SW control till HW was busy and restart the endpoint if needed.
* If EP ISOC OUT then need to flush RX FIFO to remove source of BNA
* interrupt. Reset target frame and next_desc to allow to start
* ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS
* interrupt for OUT direction.
*/
static void dwc2_gadget_start_next_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
u32 depctl;
u32 dma_reg;
u32 ctrl;
u32 dma_addr = hs_ep->desc_list_dma;
unsigned char index = hs_ep->index;
dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
if (!hs_ep->dir_in)
dwc2_flush_rx_fifo(hsotg);
dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0);
ctrl = dwc2_readl(hsotg->regs + depctl);
/*
* EP was disabled if HW has processed last descriptor or BNA was set.
* So restart ep if SW has prepared new descriptor chain in ep_queue
* routine while HW was busy.
*/
if (!(ctrl & DXEPCTL_EPENA)) {
if (!hs_ep->next_desc) {
dev_dbg(hsotg->dev, "%s: No more ISOC requests\n",
__func__);
return;
}
dma_addr += sizeof(struct dwc2_dma_desc) *
(MAX_DMA_DESC_NUM_GENERIC / 2) *
hs_ep->isoc_chain_num;
dwc2_writel(dma_addr, hsotg->regs + dma_reg);
ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
dwc2_writel(ctrl, hsotg->regs + depctl);
/* Switch ISOC descriptor chain number being processed by SW*/
hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
hs_ep->next_desc = 0;
dev_dbg(hsotg->dev, "%s: Restarted isochronous endpoint\n",
__func__);
}
hs_ep->target_frame = TARGET_FRAME_INITIAL;
hs_ep->next_desc = 0;
hs_ep->compl_desc = 0;
}
/**
@@ -2441,6 +2430,7 @@ static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
* @ep: The index number of the endpoint
* @mps: The maximum packet size in bytes
* @mc: The multicount value
* @dir_in: True if direction is in.
*
* Configure the maximum packet size for the given endpoint, updating
* the hardware control registers to reflect this.
@@ -2731,6 +2721,8 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
-ENODATA);
dwc2_gadget_incr_frame_num(hs_ep);
/* Update current frame number value. */
hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
} while (dwc2_gadget_target_frame_elapsed(hs_ep));
dwc2_gadget_start_next_request(hs_ep);
@@ -2738,7 +2730,7 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
/**
* dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
* @hs_ep: The endpoint on which interrupt is asserted.
* @ep: The endpoint on which interrupt is asserted.
*
* This is starting point for ISOC-OUT transfer, synchronization done with
* first out token received from host while corresponding EP is disabled.
@@ -2763,7 +2755,7 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
*/
tmp = dwc2_hsotg_read_frameno(hsotg);
dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA);
dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0);
if (using_desc_dma(hsotg)) {
if (ep->target_frame == TARGET_FRAME_INITIAL) {
@@ -2816,18 +2808,25 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
int dir_in = hs_ep->dir_in;
u32 tmp;
if (!dir_in || !hs_ep->isochronous)
return;
if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
tmp = dwc2_hsotg_read_frameno(hsotg);
if (using_desc_dma(hsotg)) {
dwc2_hsotg_complete_request(hsotg, hs_ep,
get_ep_head(hs_ep), 0);
hs_ep->target_frame = tmp;
dwc2_gadget_incr_frame_num(hs_ep);
dwc2_gadget_start_isoc_ddma(hs_ep);
return;
}
hs_ep->target_frame = tmp;
if (hs_ep->interval > 1) {
u32 ctrl = dwc2_readl(hsotg->regs +
DIEPCTL(hs_ep->index));
@@ -2843,7 +2842,8 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
get_ep_head(hs_ep), 0);
}
dwc2_gadget_incr_frame_num(hs_ep);
if (!using_desc_dma(hsotg))
dwc2_gadget_incr_frame_num(hs_ep);
}
/**
@@ -2901,9 +2901,9 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
/* In DDMA handle isochronous requests separately */
if (using_desc_dma(hsotg) && hs_ep->isochronous) {
dwc2_gadget_complete_isoc_request_ddma(hs_ep);
/* Try to start next isoc request */
dwc2_gadget_start_next_isoc_ddma(hs_ep);
/* XferCompl set along with BNA */
if (!(ints & DXEPINT_BNAINTR))
dwc2_gadget_complete_isoc_request_ddma(hs_ep);
} else if (dir_in) {
/*
* We get OutDone from the FIFO, so we only
@@ -2978,15 +2978,8 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (ints & DXEPINT_BNAINTR) {
dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
/*
* Try to start next isoc request, if any.
* Sometimes the endpoint remains enabled after BNA interrupt
* assertion, which is not expected, hence we can enter here
* couple of times.
*/
if (hs_ep->isochronous)
dwc2_gadget_start_next_isoc_ddma(hs_ep);
dwc2_gadget_handle_isoc_bna(hs_ep);
}
if (dir_in && !hs_ep->isochronous) {
@@ -3197,6 +3190,7 @@ static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
/**
* dwc2_hsotg_core_init - issue softreset to the core
* @hsotg: The device state
* @is_usb_reset: Usb resetting flag
*
* Issue a soft reset to the core, and await the core finishing it.
*/
@@ -3259,6 +3253,9 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
dcfg |= DCFG_DEVSPD_HS;
}
if (hsotg->params.ipg_isoc_en)
dcfg |= DCFG_IPG_ISOC_SUPPORDED;
dwc2_writel(dcfg, hsotg->regs + DCFG);
/* Clear any pending OTG interrupts */
@@ -3320,8 +3317,10 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
hsotg->regs + DOEPMSK);
/* Enable BNA interrupt for DDMA */
if (using_desc_dma(hsotg))
if (using_desc_dma(hsotg)) {
dwc2_set_bit(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK);
dwc2_set_bit(hsotg->regs + DIEPMSK, DIEPMSK_BNAININTRMSK);
}
dwc2_writel(0, hsotg->regs + DAINTMSK);
@@ -3427,7 +3426,7 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_in[idx];
/* Proceed only unmasked ISOC EPs */
if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
@@ -3473,7 +3472,7 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
daintmsk >>= DAINT_OUTEP_SHIFT;
for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
@@ -3647,7 +3646,7 @@ irq_retry:
dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
@@ -3789,6 +3788,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
unsigned int dir_in;
unsigned int i, val, size;
int ret = 0;
unsigned char ep_type;
dev_dbg(hsotg->dev,
"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
@@ -3807,9 +3807,26 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
return -EINVAL;
}
ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
mps = usb_endpoint_maxp(desc);
mc = usb_endpoint_maxp_mult(desc);
/* ISOC IN in DDMA supported bInterval up to 10 */
if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
dir_in && desc->bInterval > 10) {
dev_err(hsotg->dev,
"%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__);
return -EINVAL;
}
/* High bandwidth ISOC OUT in DDMA not supported */
if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
!dir_in && mc > 1) {
dev_err(hsotg->dev,
"%s: ISOC OUT, DDMA: HB not supported!\n", __func__);
return -EINVAL;
}
/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
@@ -3850,15 +3867,15 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
hs_ep->halted = 0;
hs_ep->interval = desc->bInterval;
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
switch (ep_type) {
case USB_ENDPOINT_XFER_ISOC:
epctrl |= DXEPCTL_EPTYPE_ISO;
epctrl |= DXEPCTL_SETEVENFR;
hs_ep->isochronous = 1;
hs_ep->interval = 1 << (desc->bInterval - 1);
hs_ep->target_frame = TARGET_FRAME_INITIAL;
hs_ep->isoc_chain_num = 0;
hs_ep->next_desc = 0;
hs_ep->compl_desc = 0;
if (dir_in) {
hs_ep->periodic = 1;
mask = dwc2_readl(hsotg->regs + DIEPMSK);
@@ -4301,7 +4318,6 @@ err:
/**
* dwc2_hsotg_udc_stop - stop the udc
* @gadget: The usb gadget state
* @driver: The usb gadget driver
*
* Stop udc hw block and stay tunned for future transmissions
*/
@@ -4453,6 +4469,7 @@ static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
* @hsotg: The device state.
* @hs_ep: The endpoint to be initialised.
* @epnum: The endpoint number
* @dir_in: True if direction is in.
*
* Initialise the given endpoint (as part of the probe and device state
* creation) to give to the gadget driver. Setup the endpoint name, any
@@ -4526,7 +4543,7 @@ static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
/**
* dwc2_hsotg_hw_cfg - read HW configuration registers
* @param: The device state
* @hsotg: Programming view of the DWC_otg controller
*
* Read the USB core HW configuration registers
*/
@@ -4582,7 +4599,8 @@ static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
/**
* dwc2_hsotg_dump - dump state of the udc
* @param: The device state
* @hsotg: Programming view of the DWC_otg controller
*
*/
static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
{
@@ -4633,7 +4651,8 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
/**
* dwc2_gadget_init - init function for gadget
* @dwc2: The data structure for the DWC2 driver.
* @hsotg: Programming view of the DWC_otg controller
*
*/
int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
{
@@ -4730,7 +4749,8 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
/**
* dwc2_hsotg_remove - remove function for hsotg driver
* @pdev: The platform information for the driver
* @hsotg: Programming view of the DWC_otg controller
*
*/
int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
{
@@ -5011,7 +5031,7 @@ int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
*
* @hsotg: Programming view of the DWC_otg controller
* @rem_wakeup: indicates whether resume is initiated by Device or Host.
* @param reset: indicates whether resume is initiated by Reset.
* @reset: indicates whether resume is initiated by Reset.
*
* Return non-zero if failed to exit from hibernation.
*/

View File

@@ -597,7 +597,7 @@ u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
* dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
* buffer
*
* @core_if: Programming view of DWC_otg controller
* @hsotg: Programming view of DWC_otg controller
* @dest: Destination buffer for the packet
* @bytes: Number of bytes to copy to the destination
*/
@@ -4087,7 +4087,6 @@ static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
* then the refcount for the structure will go to 0 and we'll free it.
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: The QH structure.
* @context: The priv pointer from a struct dwc2_hcd_urb.
* @mem_flags: Flags for allocating memory.
* @ttport: We'll return this device's port number here. That's used to

View File

@@ -80,7 +80,7 @@ struct dwc2_qh;
* @xfer_count: Number of bytes transferred so far
* @start_pkt_count: Packet count at start of transfer
* @xfer_started: True if the transfer has been started
* @ping: True if a PING request should be issued on this channel
* @do_ping: True if a PING request should be issued on this channel
* @error_state: True if the error count for this transaction is non-zero
* @halt_on_queue: True if this channel should be halted the next time a
* request is queued for the channel. This is necessary in
@@ -102,7 +102,7 @@ struct dwc2_qh;
* @schinfo: Scheduling micro-frame bitmap
* @ntd: Number of transfer descriptors for the transfer
* @halt_status: Reason for halting the host channel
* @hcint Contents of the HCINT register when the interrupt came
* @hcint: Contents of the HCINT register when the interrupt came
* @qh: QH for the transfer being processed by this channel
* @hc_list_entry: For linking to list of host channels
* @desc_list_addr: Current QH's descriptor list DMA address
@@ -237,7 +237,7 @@ struct dwc2_tt {
/**
* struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus.
*
* @start_schedule_usecs: The start time on the main bus schedule. Note that
* @start_schedule_us: The start time on the main bus schedule. Note that
* the main bus schedule is tightly packed and this
* time should be interpreted as tightly packed (so
* uFrame 0 starts at 0 us, uFrame 1 starts at 100 us
@@ -301,7 +301,6 @@ struct dwc2_hs_transfer_time {
* "struct dwc2_tt". Not used if this device is high
* speed. Note that this is in "schedule slice" which
* is tightly packed.
* @ls_duration_us: Duration on the low speed bus schedule.
* @ntd: Actual number of transfer descriptors in a list
* @qtd_list: List of QTDs for this QH
* @channel: Host channel currently processing transfers for this QH
@@ -315,7 +314,7 @@ struct dwc2_hs_transfer_time {
* descriptor
* @unreserve_timer: Timer for releasing periodic reservation.
* @wait_timer: Timer used to wait before re-queuing.
* @dwc2_tt: Pointer to our tt info (or NULL if no tt).
* @dwc_tt: Pointer to our tt info (or NULL if no tt).
* @ttport: Port number within our tt.
* @tt_buffer_dirty True if clear_tt_buffer_complete is pending
* @unreserve_pending: True if we planned to unreserve but haven't yet.
@@ -325,6 +324,7 @@ struct dwc2_hs_transfer_time {
* periodic transfers and is ignored for periodic ones.
* @wait_timer_cancel: Set to true to cancel the wait_timer.
*
* @tt_buffer_dirty: True if EP's TT buffer is not clean.
* A Queue Head (QH) holds the static characteristics of an endpoint and
* maintains a list of transfers (QTDs) for that endpoint. A QH structure may
* be entered in either the non-periodic or periodic schedule.
@@ -400,6 +400,10 @@ struct dwc2_qh {
* @urb: URB for this transfer
* @qh: Queue head for this QTD
* @qtd_list_entry: For linking to the QH's list of QTDs
* @isoc_td_first: Index of first activated isochronous transfer
* descriptor in Descriptor DMA mode
* @isoc_td_last: Index of last activated isochronous transfer
* descriptor in Descriptor DMA mode
*
* A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
* interrupt, or isochronous transfer. A single QTD is created for each URB

View File

@@ -332,6 +332,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: The QH to init
* @mem_flags: Indicates the type of memory allocation
*
* Return: 0 if successful, negative error code otherwise
*

View File

@@ -478,6 +478,12 @@ static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
* of the URB based on the number of bytes transferred via the host channel.
* Sets the URB status if the data transfer is finished.
*
* @hsotg: Programming view of the DWC_otg controller
* @chan: Programming view of host channel
* @chnum: Channel number
* @urb: Processing URB
* @qtd: Queue transfer descriptor
*
* Return: 1 if the data transfer specified by the URB is completely finished,
* 0 otherwise
*/
@@ -566,6 +572,12 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
* halt_status. Completes the Isochronous URB if all the URB frames have been
* completed.
*
* @hsotg: Programming view of the DWC_otg controller
* @chan: Programming view of host channel
* @chnum: Channel number
* @halt_status: Reason for halting a host channel
* @qtd: Queue transfer descriptor
*
* Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
* transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
*/

View File

@@ -679,6 +679,7 @@ static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
* @index: Transfer index
*/
static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, int index)
@@ -1276,7 +1277,7 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
* release the reservation. This worker is called after the appropriate
* delay.
*
* @work: Pointer to a qh unreserve_work.
* @t: Address to a qh unreserve_work.
*/
static void dwc2_unreserve_timer_fn(struct timer_list *t)
{
@@ -1631,7 +1632,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
* @hsotg: The HCD state structure for the DWC OTG controller
* @urb: Holds the information about the device/endpoint needed
* to initialize the QH
* @atomic_alloc: Flag to do atomic allocation if needed
* @mem_flags: Flags for allocating memory.
*
* Return: Pointer to the newly allocated QH, or NULL on error
*/

View File

@@ -311,6 +311,7 @@
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14)
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
#define GHWCFG4_ACG_SUPPORTED BIT(12)
#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11)
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
@@ -424,6 +425,7 @@
#define DCFG_EPMISCNT_SHIFT 18
#define DCFG_EPMISCNT_LIMIT 0x1f
#define DCFG_EPMISCNT(_x) ((_x) << 18)
#define DCFG_IPG_ISOC_SUPPORDED BIT(17)
#define DCFG_PERFRINT_MASK (0x3 << 11)
#define DCFG_PERFRINT_SHIFT 11
#define DCFG_PERFRINT_LIMIT 0x3

View File

@@ -70,6 +70,7 @@ static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
GAHBCFG_HBSTLEN_SHIFT;
p->uframe_sched = false;
p->change_speed_quirk = true;
p->power_down = false;
}
static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
@@ -269,6 +270,9 @@ static void dwc2_set_param_power_down(struct dwc2_hsotg *hsotg)
/**
* dwc2_set_default_params() - Set all core parameters to their
* auto-detected default values.
*
* @hsotg: Programming view of the DWC_otg controller
*
*/
static void dwc2_set_default_params(struct dwc2_hsotg *hsotg)
{
@@ -298,6 +302,7 @@ static void dwc2_set_default_params(struct dwc2_hsotg *hsotg)
p->besl = true;
p->hird_threshold_en = true;
p->hird_threshold = 4;
p->ipg_isoc_en = false;
p->max_packet_count = hw->max_packet_count;
p->max_transfer_size = hw->max_transfer_size;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR << GAHBCFG_HBSTLEN_SHIFT;
@@ -338,6 +343,8 @@ static void dwc2_set_default_params(struct dwc2_hsotg *hsotg)
/**
* dwc2_get_device_properties() - Read in device properties.
*
* @hsotg: Programming view of the DWC_otg controller
*
* Read in the device properties and adjust core parameters if needed.
*/
static void dwc2_get_device_properties(struct dwc2_hsotg *hsotg)
@@ -549,7 +556,7 @@ static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
}
#define CHECK_RANGE(_param, _min, _max, _def) do { \
if ((hsotg->params._param) < (_min) || \
if ((int)(hsotg->params._param) < (_min) || \
(hsotg->params._param) > (_max)) { \
dev_warn(hsotg->dev, "%s: Invalid parameter %s=%d\n", \
__func__, #_param, hsotg->params._param); \
@@ -579,6 +586,7 @@ static void dwc2_check_params(struct dwc2_hsotg *hsotg)
CHECK_BOOL(enable_dynamic_fifo, hw->enable_dynamic_fifo);
CHECK_BOOL(en_multiple_tx_fifo, hw->en_multiple_tx_fifo);
CHECK_BOOL(i2c_enable, hw->i2c_enable);
CHECK_BOOL(ipg_isoc_en, hw->ipg_isoc_en);
CHECK_BOOL(acg_enable, hw->acg_enable);
CHECK_BOOL(reload_ctl, (hsotg->hw_params.snpsid > DWC2_CORE_REV_2_92a));
CHECK_BOOL(lpm, (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_80a));
@@ -688,6 +696,9 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
/**
* During device initialization, read various hardware configuration
* registers and interpret the contents.
*
* @hsotg: Programming view of the DWC_otg controller
*
*/
int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
{
@@ -772,6 +783,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
hw->acg_enable = !!(hwcfg4 & GHWCFG4_ACG_SUPPORTED);
hw->ipg_isoc_en = !!(hwcfg4 & GHWCFG4_IPG_ISOC_SUPPORTED);
/* fifo sizes */
hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>

View File

@@ -77,6 +77,12 @@ static int dwc2_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc2)
return 0;
}
/**
* dwc2_pci_probe() - Provides the cleanup entry points for the DWC_otg PCI
* driver
*
* @pci: The programming view of DWC_otg PCI
*/
static void dwc2_pci_remove(struct pci_dev *pci)
{
struct dwc2_pci_glue *glue = pci_get_drvdata(pci);

View File

@@ -106,4 +106,16 @@ config USB_DWC3_ST
inside (i.e. STiH407).
Say 'Y' or 'M' if you have one such device.
config USB_DWC3_QCOM
tristate "Qualcomm Platform"
depends on ARCH_QCOM || COMPILE_TEST
depends on OF
default USB_DWC3
help
Some Qualcomm SoCs use DesignWare Core IP for USB2/3
functionality.
This driver also handles Qscratch wrapper which is needed
for peripheral mode support.
Say 'Y' or 'M' if you have one such device.
endif

View File

@@ -48,3 +48,4 @@ obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
obj-$(CONFIG_USB_DWC3_KEYSTONE) += dwc3-keystone.o
obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o
obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o

View File

@@ -8,6 +8,7 @@
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
#include <linux/clk.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -24,6 +25,7 @@
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -266,6 +268,12 @@ done:
return 0;
}
static const struct clk_bulk_data dwc3_core_clks[] = {
{ .id = "ref" },
{ .id = "bus_early" },
{ .id = "suspend" },
};
/*
* dwc3_frame_length_adjustment - Adjusts frame length if required
* @dwc3: Pointer to our controller context structure
@@ -667,6 +675,9 @@ static void dwc3_core_exit(struct dwc3 *dwc)
usb_phy_set_suspend(dwc->usb3_phy, 1);
phy_power_off(dwc->usb2_generic_phy);
phy_power_off(dwc->usb3_generic_phy);
clk_bulk_disable(dwc->num_clks, dwc->clks);
clk_bulk_unprepare(dwc->num_clks, dwc->clks);
reset_control_assert(dwc->reset);
}
static bool dwc3_core_is_valid(struct dwc3 *dwc)
@@ -1245,7 +1256,7 @@ static void dwc3_check_params(struct dwc3 *dwc)
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct resource *res, dwc_res;
struct dwc3 *dwc;
int ret;
@@ -1256,6 +1267,12 @@ static int dwc3_probe(struct platform_device *pdev)
if (!dwc)
return -ENOMEM;
dwc->clks = devm_kmemdup(dev, dwc3_core_clks, sizeof(dwc3_core_clks),
GFP_KERNEL);
if (!dwc->clks)
return -ENOMEM;
dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
dwc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1270,23 +1287,48 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->xhci_resources[0].flags = res->flags;
dwc->xhci_resources[0].name = res->name;
res->start += DWC3_GLOBALS_REGS_START;
/*
* Request memory region but exclude xHCI regs,
* since it will be requested by the xhci-plat driver.
*/
regs = devm_ioremap_resource(dev, res);
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
goto err0;
}
dwc_res = *res;
dwc_res.start += DWC3_GLOBALS_REGS_START;
regs = devm_ioremap_resource(dev, &dwc_res);
if (IS_ERR(regs))
return PTR_ERR(regs);
dwc->regs = regs;
dwc->regs_size = resource_size(res);
dwc->regs_size = resource_size(&dwc_res);
dwc3_get_properties(dwc);
dwc->reset = devm_reset_control_get_optional_shared(dev, NULL);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
if (ret == -EPROBE_DEFER)
return ret;
/*
* Clocks are optional, but new DT platforms should support all clocks
* as required by the DT-binding.
*/
if (ret)
dwc->num_clks = 0;
ret = reset_control_deassert(dwc->reset);
if (ret)
goto put_clks;
ret = clk_bulk_prepare(dwc->num_clks, dwc->clks);
if (ret)
goto assert_reset;
ret = clk_bulk_enable(dwc->num_clks, dwc->clks);
if (ret)
goto unprepare_clks;
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
@@ -1350,13 +1392,13 @@ err1:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
err0:
/*
* restore res->start back to its original value so that, in case the
* probe is deferred, we don't end up getting error in request the
* memory region the next time probe is called.
*/
res->start -= DWC3_GLOBALS_REGS_START;
clk_bulk_disable(dwc->num_clks, dwc->clks);
unprepare_clks:
clk_bulk_unprepare(dwc->num_clks, dwc->clks);
assert_reset:
reset_control_assert(dwc->reset);
put_clks:
clk_bulk_put(dwc->num_clks, dwc->clks);
return ret;
}
@@ -1364,15 +1406,8 @@ err0:
static int dwc3_remove(struct platform_device *pdev)
{
struct dwc3 *dwc = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pm_runtime_get_sync(&pdev->dev);
/*
* restore res->start back to its original value so that, in case the
* probe is deferred, we don't end up getting error in request the
* memory region the next time probe is called.
*/
res->start -= DWC3_GLOBALS_REGS_START;
dwc3_debugfs_exit(dwc);
dwc3_core_exit_mode(dwc);
@@ -1386,14 +1421,48 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_free_event_buffers(dwc);
dwc3_free_scratch_buffers(dwc);
clk_bulk_put(dwc->num_clks, dwc->clks);
return 0;
}
#ifdef CONFIG_PM
static int dwc3_core_init_for_resume(struct dwc3 *dwc)
{
int ret;
ret = reset_control_deassert(dwc->reset);
if (ret)
return ret;
ret = clk_bulk_prepare(dwc->num_clks, dwc->clks);
if (ret)
goto assert_reset;
ret = clk_bulk_enable(dwc->num_clks, dwc->clks);
if (ret)
goto unprepare_clks;
ret = dwc3_core_init(dwc);
if (ret)
goto disable_clks;
return 0;
disable_clks:
clk_bulk_disable(dwc->num_clks, dwc->clks);
unprepare_clks:
clk_bulk_unprepare(dwc->num_clks, dwc->clks);
assert_reset:
reset_control_assert(dwc->reset);
return ret;
}
static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
{
unsigned long flags;
u32 reg;
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
@@ -1403,9 +1472,25 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
dwc3_core_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
/* do nothing during host runtime_suspend */
if (!PMSG_IS_AUTO(msg))
if (!PMSG_IS_AUTO(msg)) {
dwc3_core_exit(dwc);
break;
}
/* Let controller to suspend HSPHY before PHY driver suspends */
if (dwc->dis_u2_susphy_quirk ||
dwc->dis_enblslpm_quirk) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_ENBLSLPM |
DWC3_GUSB2PHYCFG_SUSPHY;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
/* Give some time for USB2 PHY to suspend */
usleep_range(5000, 6000);
}
phy_pm_runtime_put_sync(dwc->usb2_generic_phy);
phy_pm_runtime_put_sync(dwc->usb3_generic_phy);
break;
case DWC3_GCTL_PRTCAP_OTG:
/* do nothing during runtime_suspend */
@@ -1433,10 +1518,11 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
{
unsigned long flags;
int ret;
u32 reg;
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
ret = dwc3_core_init(dwc);
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
@@ -1446,13 +1532,25 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
spin_unlock_irqrestore(&dwc->lock, flags);
break;
case DWC3_GCTL_PRTCAP_HOST:
/* nothing to do on host runtime_resume */
if (!PMSG_IS_AUTO(msg)) {
ret = dwc3_core_init(dwc);
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
break;
}
/* Restore GUSB2PHYCFG bits that were modified in suspend */
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (dwc->dis_u2_susphy_quirk)
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
if (dwc->dis_enblslpm_quirk)
reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
phy_pm_runtime_get_sync(dwc->usb2_generic_phy);
phy_pm_runtime_get_sync(dwc->usb3_generic_phy);
break;
case DWC3_GCTL_PRTCAP_OTG:
/* nothing to do on runtime_resume */

View File

@@ -639,8 +639,6 @@ struct dwc3_event_buffer {
* @resource_index: Resource transfer index
* @frame_number: set to the frame number we want this transfer to start (ISOC)
* @interval: the interval on which the ISOC transfer is started
* @allocated_requests: number of requests allocated
* @queued_requests: number of requests queued for transfer
* @name: a human readable name e.g. ep1out-bulk
* @direction: true for TX, false for RX
* @stream_capable: true when streams are enabled
@@ -664,11 +662,9 @@ struct dwc3_ep {
#define DWC3_EP_ENABLED BIT(0)
#define DWC3_EP_STALL BIT(1)
#define DWC3_EP_WEDGE BIT(2)
#define DWC3_EP_BUSY BIT(4)
#define DWC3_EP_TRANSFER_STARTED BIT(3)
#define DWC3_EP_PENDING_REQUEST BIT(5)
#define DWC3_EP_MISSED_ISOC BIT(6)
#define DWC3_EP_END_TRANSFER_PENDING BIT(7)
#define DWC3_EP_TRANSFER_STARTED BIT(8)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
@@ -688,8 +684,6 @@ struct dwc3_ep {
u8 number;
u8 type;
u8 resource_index;
u32 allocated_requests;
u32 queued_requests;
u32 frame_number;
u32 interval;
@@ -832,7 +826,9 @@ struct dwc3_hwparams {
* @list: a list_head used for request queueing
* @dep: struct dwc3_ep owning this request
* @sg: pointer to first incomplete sg
* @start_sg: pointer to the sg which should be queued next
* @num_pending_sgs: counter to pending sgs
* @num_queued_sgs: counter to the number of sgs which already got queued
* @remaining: amount of data remaining
* @epnum: endpoint number to which this request refers
* @trb: pointer to struct dwc3_trb
@@ -848,8 +844,10 @@ struct dwc3_request {
struct list_head list;
struct dwc3_ep *dep;
struct scatterlist *sg;
struct scatterlist *start_sg;
unsigned num_pending_sgs;
unsigned int num_queued_sgs;
unsigned remaining;
u8 epnum;
struct dwc3_trb *trb;
@@ -891,6 +889,9 @@ struct dwc3_scratchpad_array {
* @eps: endpoint array
* @gadget: device side representation of the peripheral controller
* @gadget_driver: pointer to the gadget driver
* @clks: array of clocks
* @num_clks: number of clocks
* @reset: reset control
* @regs: base address for our registers
* @regs_size: address space size
* @fladj: frame length adjustment
@@ -1013,6 +1014,11 @@ struct dwc3 {
struct usb_gadget gadget;
struct usb_gadget_driver *gadget_driver;
struct clk_bulk_data *clks;
int num_clks;
struct reset_control *reset;
struct usb_phy *usb2_phy;
struct usb_phy *usb3_phy;
@@ -1197,11 +1203,12 @@ struct dwc3_event_depevt {
/* Within XferNotReady */
#define DEPEVT_STATUS_TRANSFER_ACTIVE BIT(3)
/* Within XferComplete */
/* Within XferComplete or XferInProgress */
#define DEPEVT_STATUS_BUSERR BIT(0)
#define DEPEVT_STATUS_SHORT BIT(1)
#define DEPEVT_STATUS_IOC BIT(2)
#define DEPEVT_STATUS_LST BIT(3)
#define DEPEVT_STATUS_LST BIT(3) /* XferComplete */
#define DEPEVT_STATUS_MISSED_ISOC BIT(3) /* XferInProgress */
/* Stream event only */
#define DEPEVT_STREAMEVT_FOUND 1

View File

@@ -475,21 +475,37 @@ dwc3_ep_event_string(char *str, const struct dwc3_event_depevt *event,
if (ret < 0)
return "UNKNOWN";
status = event->status;
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
strcat(str, "Transfer Complete");
len = strlen(str);
sprintf(str + len, "Transfer Complete (%c%c%c)",
status & DEPEVT_STATUS_SHORT ? 'S' : 's',
status & DEPEVT_STATUS_IOC ? 'I' : 'i',
status & DEPEVT_STATUS_LST ? 'L' : 'l');
len = strlen(str);
if (epnum <= 1)
sprintf(str + len, " [%s]", dwc3_ep0_state_string(ep0state));
break;
case DWC3_DEPEVT_XFERINPROGRESS:
strcat(str, "Transfer In-Progress");
len = strlen(str);
sprintf(str + len, "Transfer In Progress [%d] (%c%c%c)",
event->parameters,
status & DEPEVT_STATUS_SHORT ? 'S' : 's',
status & DEPEVT_STATUS_IOC ? 'I' : 'i',
status & DEPEVT_STATUS_LST ? 'M' : 'm');
break;
case DWC3_DEPEVT_XFERNOTREADY:
strcat(str, "Transfer Not Ready");
status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
strcat(str, status ? " (Active)" : " (Not Active)");
len = strlen(str);
sprintf(str + len, "Transfer Not Ready [%d]%s",
event->parameters,
status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
" (Active)" : " (Not Active)");
/* Control Endpoints */
if (epnum <= 1) {

View File

@@ -716,9 +716,6 @@ static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
struct dentry *dir;
dir = debugfs_create_dir(dep->name, parent);
if (IS_ERR_OR_NULL(dir))
return;
dwc3_debugfs_create_endpoint_files(dep, dir);
}
@@ -740,49 +737,31 @@ static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
void dwc3_debugfs_init(struct dwc3 *dwc)
{
struct dentry *root;
struct dentry *file;
root = debugfs_create_dir(dev_name(dwc->dev), NULL);
if (IS_ERR_OR_NULL(root)) {
if (!root)
dev_err(dwc->dev, "Can't create debugfs root\n");
return;
}
dwc->root = root;
dwc->regset = kzalloc(sizeof(*dwc->regset), GFP_KERNEL);
if (!dwc->regset) {
debugfs_remove_recursive(root);
if (!dwc->regset)
return;
}
dwc->regset->regs = dwc3_regs;
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
if (!file)
dev_dbg(dwc->dev, "Can't create debugfs regdump\n");
root = debugfs_create_dir(dev_name(dwc->dev), NULL);
dwc->root = root;
debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
dwc, &dwc3_mode_fops);
if (!file)
dev_dbg(dwc->dev, "Can't create debugfs mode\n");
debugfs_create_file("mode", S_IRUGO | S_IWUSR, root, dwc,
&dwc3_mode_fops);
}
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) ||
IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
file = debugfs_create_file("testmode", S_IRUGO | S_IWUSR, root,
dwc, &dwc3_testmode_fops);
if (!file)
dev_dbg(dwc->dev, "Can't create debugfs testmode\n");
file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR,
root, dwc, &dwc3_link_state_fops);
if (!file)
dev_dbg(dwc->dev, "Can't create debugfs link_state\n");
debugfs_create_file("testmode", S_IRUGO | S_IWUSR, root, dwc,
&dwc3_testmode_fops);
debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, dwc,
&dwc3_link_state_fops);
dwc3_debugfs_create_endpoint_dirs(dwc, root);
}
}

View File

@@ -8,6 +8,7 @@
*/
#include <linux/extcon.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include "debug.h"
@@ -439,17 +440,38 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
struct device_node *np_phy, *np_conn;
struct extcon_dev *edev;
if (of_property_read_bool(dev->of_node, "extcon"))
return extcon_get_edev_by_phandle(dwc->dev, 0);
np_phy = of_parse_phandle(dev->of_node, "phys", 0);
np_conn = of_graph_get_remote_node(np_phy, -1, -1);
if (np_conn)
edev = extcon_find_edev_by_node(np_conn);
else
edev = NULL;
of_node_put(np_conn);
of_node_put(np_phy);
return edev;
}
int dwc3_drd_init(struct dwc3 *dwc)
{
int ret, irq;
if (dwc->dev->of_node &&
of_property_read_bool(dwc->dev->of_node, "extcon")) {
dwc->edev = extcon_get_edev_by_phandle(dwc->dev, 0);
if (IS_ERR(dwc->edev))
return PTR_ERR(dwc->edev);
dwc->edev = dwc3_get_extcon(dwc);
if (IS_ERR(dwc->edev))
return PTR_ERR(dwc->edev);
if (dwc->edev) {
dwc->edev_nb.notifier_call = dwc3_drd_notifier;
ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
&dwc->edev_nb);

View File

@@ -208,13 +208,13 @@ static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
};
static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "qcom,dwc3" },
{ .compatible = "rockchip,rk3399-dwc3" },
{ .compatible = "xlnx,zynqmp-dwc3" },
{ .compatible = "cavium,octeon-7130-usb-uctl" },
{ .compatible = "sprd,sc9860-dwc3" },
{ .compatible = "amlogic,meson-axg-dwc3" },
{ .compatible = "amlogic,meson-gxl-dwc3" },
{ .compatible = "allwinner,sun50i-h6-dwc3" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);

View File

@@ -0,0 +1,619 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Inspired by dwc3-of-simple.c
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/extcon.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/usb/of.h>
#include <linux/reset.h>
#include <linux/iopoll.h>
#include "core.h"
/* USB QSCRATCH Hardware registers */
#define QSCRATCH_HS_PHY_CTRL 0x10
#define UTMI_OTG_VBUS_VALID BIT(20)
#define SW_SESSVLD_SEL BIT(28)
#define QSCRATCH_SS_PHY_CTRL 0x30
#define LANE0_PWR_PRESENT BIT(24)
#define QSCRATCH_GENERAL_CFG 0x08
#define PIPE_UTMI_CLK_SEL BIT(0)
#define PIPE3_PHYSTATUS_SW BIT(3)
#define PIPE_UTMI_CLK_DIS BIT(8)
#define PWR_EVNT_IRQ_STAT_REG 0x58
#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
struct dwc3_qcom {
struct device *dev;
void __iomem *qscratch_base;
struct platform_device *dwc3;
struct clk **clks;
int num_clocks;
struct reset_control *resets;
int hs_phy_irq;
int dp_hs_phy_irq;
int dm_hs_phy_irq;
int ss_phy_irq;
struct extcon_dev *edev;
struct extcon_dev *host_edev;
struct notifier_block vbus_nb;
struct notifier_block host_nb;
enum usb_dr_mode mode;
bool is_suspended;
bool pm_suspended;
};
static inline void dwc3_qcom_setbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
reg = readl(base + offset);
reg |= val;
writel(reg, base + offset);
/* ensure that above write is through */
readl(base + offset);
}
static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
reg = readl(base + offset);
reg &= ~val;
writel(reg, base + offset);
/* ensure that above write is through */
readl(base + offset);
}
static void dwc3_qcom_vbus_overrride_enable(struct dwc3_qcom *qcom, bool enable)
{
if (enable) {
dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL,
LANE0_PWR_PRESENT);
dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL,
UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL);
} else {
dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL,
LANE0_PWR_PRESENT);
dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL,
UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL);
}
}
static int dwc3_qcom_vbus_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, vbus_nb);
/* enable vbus override for device mode */
dwc3_qcom_vbus_overrride_enable(qcom, event);
qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST;
return NOTIFY_DONE;
}
static int dwc3_qcom_host_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, host_nb);
/* disable vbus override in host mode */
dwc3_qcom_vbus_overrride_enable(qcom, !event);
qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL;
return NOTIFY_DONE;
}
static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom)
{
struct device *dev = qcom->dev;
struct extcon_dev *host_edev;
int ret;
if (!of_property_read_bool(dev->of_node, "extcon"))
return 0;
qcom->edev = extcon_get_edev_by_phandle(dev, 0);
if (IS_ERR(qcom->edev))
return PTR_ERR(qcom->edev);
qcom->vbus_nb.notifier_call = dwc3_qcom_vbus_notifier;
qcom->host_edev = extcon_get_edev_by_phandle(dev, 1);
if (IS_ERR(qcom->host_edev))
qcom->host_edev = NULL;
ret = devm_extcon_register_notifier(dev, qcom->edev, EXTCON_USB,
&qcom->vbus_nb);
if (ret < 0) {
dev_err(dev, "VBUS notifier register failed\n");
return ret;
}
if (qcom->host_edev)
host_edev = qcom->host_edev;
else
host_edev = qcom->edev;
qcom->host_nb.notifier_call = dwc3_qcom_host_notifier;
ret = devm_extcon_register_notifier(dev, host_edev, EXTCON_USB_HOST,
&qcom->host_nb);
if (ret < 0) {
dev_err(dev, "Host notifier register failed\n");
return ret;
}
/* Update initial VBUS override based on extcon state */
if (extcon_get_state(qcom->edev, EXTCON_USB) ||
!extcon_get_state(host_edev, EXTCON_USB_HOST))
dwc3_qcom_vbus_notifier(&qcom->vbus_nb, true, qcom->edev);
else
dwc3_qcom_vbus_notifier(&qcom->vbus_nb, false, qcom->edev);
return 0;
}
static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
{
if (qcom->hs_phy_irq) {
disable_irq_wake(qcom->hs_phy_irq);
disable_irq_nosync(qcom->hs_phy_irq);
}
if (qcom->dp_hs_phy_irq) {
disable_irq_wake(qcom->dp_hs_phy_irq);
disable_irq_nosync(qcom->dp_hs_phy_irq);
}
if (qcom->dm_hs_phy_irq) {
disable_irq_wake(qcom->dm_hs_phy_irq);
disable_irq_nosync(qcom->dm_hs_phy_irq);
}
if (qcom->ss_phy_irq) {
disable_irq_wake(qcom->ss_phy_irq);
disable_irq_nosync(qcom->ss_phy_irq);
}
}
static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
{
if (qcom->hs_phy_irq) {
enable_irq(qcom->hs_phy_irq);
enable_irq_wake(qcom->hs_phy_irq);
}
if (qcom->dp_hs_phy_irq) {
enable_irq(qcom->dp_hs_phy_irq);
enable_irq_wake(qcom->dp_hs_phy_irq);
}
if (qcom->dm_hs_phy_irq) {
enable_irq(qcom->dm_hs_phy_irq);
enable_irq_wake(qcom->dm_hs_phy_irq);
}
if (qcom->ss_phy_irq) {
enable_irq(qcom->ss_phy_irq);
enable_irq_wake(qcom->ss_phy_irq);
}
}
static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
{
u32 val;
int i;
if (qcom->is_suspended)
return 0;
val = readl(qcom->qscratch_base + PWR_EVNT_IRQ_STAT_REG);
if (!(val & PWR_EVNT_LPM_IN_L2_MASK))
dev_err(qcom->dev, "HS-PHY not in L2\n");
for (i = qcom->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(qcom->clks[i]);
qcom->is_suspended = true;
dwc3_qcom_enable_interrupts(qcom);
return 0;
}
static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
{
int ret;
int i;
if (!qcom->is_suspended)
return 0;
dwc3_qcom_disable_interrupts(qcom);
for (i = 0; i < qcom->num_clocks; i++) {
ret = clk_prepare_enable(qcom->clks[i]);
if (ret < 0) {
while (--i >= 0)
clk_disable_unprepare(qcom->clks[i]);
return ret;
}
}
/* Clear existing events from PHY related to L2 in/out */
dwc3_qcom_setbits(qcom->qscratch_base, PWR_EVNT_IRQ_STAT_REG,
PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
qcom->is_suspended = false;
return 0;
}
static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
{
struct dwc3_qcom *qcom = data;
struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
/* If pm_suspended then let pm_resume take care of resuming h/w */
if (qcom->pm_suspended)
return IRQ_HANDLED;
if (dwc->xhci)
pm_runtime_resume(&dwc->xhci->dev);
return IRQ_HANDLED;
}
static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom)
{
/* Configure dwc3 to use UTMI clock as PIPE clock not present */
dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
PIPE_UTMI_CLK_DIS);
usleep_range(100, 1000);
dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
PIPE_UTMI_CLK_SEL | PIPE3_PHYSTATUS_SW);
usleep_range(100, 1000);
dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
PIPE_UTMI_CLK_DIS);
}
static int dwc3_qcom_setup_irq(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
int irq, ret;
irq = platform_get_irq_byname(pdev, "hs_phy_irq");
if (irq > 0) {
/* Keep wakeup interrupts disabled until suspend */
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"qcom_dwc3 HS", qcom);
if (ret) {
dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
return ret;
}
qcom->hs_phy_irq = irq;
}
irq = platform_get_irq_byname(pdev, "dp_hs_phy_irq");
if (irq > 0) {
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"qcom_dwc3 DP_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
return ret;
}
qcom->dp_hs_phy_irq = irq;
}
irq = platform_get_irq_byname(pdev, "dm_hs_phy_irq");
if (irq > 0) {
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"qcom_dwc3 DM_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
return ret;
}
qcom->dm_hs_phy_irq = irq;
}
irq = platform_get_irq_byname(pdev, "ss_phy_irq");
if (irq > 0) {
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"qcom_dwc3 SS", qcom);
if (ret) {
dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
return ret;
}
qcom->ss_phy_irq = irq;
}
return 0;
}
static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count)
{
struct device *dev = qcom->dev;
struct device_node *np = dev->of_node;
int i;
qcom->num_clocks = count;
if (!count)
return 0;
qcom->clks = devm_kcalloc(dev, qcom->num_clocks,
sizeof(struct clk *), GFP_KERNEL);
if (!qcom->clks)
return -ENOMEM;
for (i = 0; i < qcom->num_clocks; i++) {
struct clk *clk;
int ret;
clk = of_clk_get(np, i);
if (IS_ERR(clk)) {
while (--i >= 0)
clk_put(qcom->clks[i]);
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret < 0) {
while (--i >= 0) {
clk_disable_unprepare(qcom->clks[i]);
clk_put(qcom->clks[i]);
}
clk_put(clk);
return ret;
}
qcom->clks[i] = clk;
}
return 0;
}
static int dwc3_qcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *dwc3_np;
struct device *dev = &pdev->dev;
struct dwc3_qcom *qcom;
struct resource *res;
int ret, i;
bool ignore_pipe_clk;
qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL);
if (!qcom)
return -ENOMEM;
platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
qcom->resets = devm_reset_control_array_get_optional_exclusive(dev);
if (IS_ERR(qcom->resets)) {
ret = PTR_ERR(qcom->resets);
dev_err(&pdev->dev, "failed to get resets, err=%d\n", ret);
return ret;
}
ret = reset_control_assert(qcom->resets);
if (ret) {
dev_err(&pdev->dev, "failed to assert resets, err=%d\n", ret);
return ret;
}
usleep_range(10, 1000);
ret = reset_control_deassert(qcom->resets);
if (ret) {
dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret);
goto reset_assert;
}
ret = dwc3_qcom_clk_init(qcom, of_count_phandle_with_args(np,
"clocks", "#clock-cells"));
if (ret) {
dev_err(dev, "failed to get clocks\n");
goto reset_assert;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
qcom->qscratch_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qcom->qscratch_base)) {
dev_err(dev, "failed to map qscratch, err=%d\n", ret);
ret = PTR_ERR(qcom->qscratch_base);
goto clk_disable;
}
ret = dwc3_qcom_setup_irq(pdev);
if (ret)
goto clk_disable;
dwc3_np = of_get_child_by_name(np, "dwc3");
if (!dwc3_np) {
dev_err(dev, "failed to find dwc3 core child\n");
ret = -ENODEV;
goto clk_disable;
}
/*
* Disable pipe_clk requirement if specified. Used when dwc3
* operates without SSPHY and only HS/FS/LS modes are supported.
*/
ignore_pipe_clk = device_property_read_bool(dev,
"qcom,select-utmi-as-pipe-clk");
if (ignore_pipe_clk)
dwc3_qcom_select_utmi_clk(qcom);
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to register dwc3 core - %d\n", ret);
goto clk_disable;
}
qcom->dwc3 = of_find_device_by_node(dwc3_np);
if (!qcom->dwc3) {
dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
goto depopulate;
}
qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
/* enable vbus override for device mode */
if (qcom->mode == USB_DR_MODE_PERIPHERAL)
dwc3_qcom_vbus_overrride_enable(qcom, true);
/* register extcon to override sw_vbus on Vbus change later */
ret = dwc3_qcom_register_extcon(qcom);
if (ret)
goto depopulate;
device_init_wakeup(&pdev->dev, 1);
qcom->is_suspended = false;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_forbid(dev);
return 0;
depopulate:
of_platform_depopulate(&pdev->dev);
clk_disable:
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
clk_put(qcom->clks[i]);
}
reset_assert:
reset_control_assert(qcom->resets);
return ret;
}
static int dwc3_qcom_remove(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int i;
of_platform_depopulate(dev);
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
clk_put(qcom->clks[i]);
}
qcom->num_clocks = 0;
reset_control_assert(qcom->resets);
pm_runtime_allow(dev);
pm_runtime_disable(dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int dwc3_qcom_pm_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
int ret = 0;
ret = dwc3_qcom_suspend(qcom);
if (!ret)
qcom->pm_suspended = true;
return ret;
}
static int dwc3_qcom_pm_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
int ret;
ret = dwc3_qcom_resume(qcom);
if (!ret)
qcom->pm_suspended = false;
return ret;
}
#endif
#ifdef CONFIG_PM
static int dwc3_qcom_runtime_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
return dwc3_qcom_suspend(qcom);
}
static int dwc3_qcom_runtime_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
return dwc3_qcom_resume(qcom);
}
#endif
static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
SET_RUNTIME_PM_OPS(dwc3_qcom_runtime_suspend, dwc3_qcom_runtime_resume,
NULL)
};
static const struct of_device_id dwc3_qcom_of_match[] = {
{ .compatible = "qcom,dwc3" },
{ .compatible = "qcom,msm8996-dwc3" },
{ .compatible = "qcom,sdm845-dwc3" },
{ }
};
MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match);
static struct platform_driver dwc3_qcom_driver = {
.probe = dwc3_qcom_probe,
.remove = dwc3_qcom_remove,
.driver = {
.name = "dwc3-qcom",
.pm = &dwc3_qcom_dev_pm_ops,
.of_match_table = dwc3_qcom_of_match,
},
};
module_platform_driver(dwc3_qcom_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare DWC3 QCOM Glue Driver");

View File

@@ -66,7 +66,7 @@ static int dwc3_ep0_start_trans(struct dwc3_ep *dep)
struct dwc3 *dwc;
int ret;
if (dep->flags & DWC3_EP_BUSY)
if (dep->flags & DWC3_EP_TRANSFER_STARTED)
return 0;
dwc = dep->dwc;
@@ -79,8 +79,6 @@ static int dwc3_ep0_start_trans(struct dwc3_ep *dep)
if (ret < 0)
return ret;
dep->flags |= DWC3_EP_BUSY;
dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
dwc->ep0_next_event = DWC3_EP0_COMPLETE;
return 0;
@@ -913,7 +911,7 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
{
struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
dep->flags &= ~DWC3_EP_BUSY;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
dep->resource_index = 0;
dwc->setup_packet_pending = false;

File diff suppressed because it is too large Load Diff

View File

@@ -98,13 +98,12 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
* Caller should take care of locking. Returns the transfer resource
* index for a given endpoint.
*/
static inline u32 dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
static inline void dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
{
u32 res_id;
res_id = dwc3_readl(dep->regs, DWC3_DEPCMD);
return DWC3_DEPCMD_GET_RSC_IDX(res_id);
dep->resource_index = DWC3_DEPCMD_GET_RSC_IDX(res_id);
}
#endif /* __DRIVERS_USB_DWC3_GADGET_H */

View File

@@ -230,17 +230,14 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
TP_fast_assign(
__assign_str(name, dep->name);
__entry->trb = trb;
__entry->allocated = dep->allocated_requests;
__entry->queued = dep->queued_requests;
__entry->bpl = trb->bpl;
__entry->bph = trb->bph;
__entry->size = trb->size;
__entry->ctrl = trb->ctrl;
__entry->type = usb_endpoint_type(dep->endpoint.desc);
),
TP_printk("%s: %d/%d trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
__get_str(name), __entry->queued, __entry->allocated,
__entry->trb, __entry->bph, __entry->bpl,
TP_printk("%s: trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
__get_str(name), __entry->trb, __entry->bph, __entry->bpl,
({char *s;
int pcm = ((__entry->size >> 24) & 3) + 1;
switch (__entry->type) {
@@ -306,7 +303,7 @@ DECLARE_EVENT_CLASS(dwc3_log_ep,
__entry->trb_enqueue = dep->trb_enqueue;
__entry->trb_dequeue = dep->trb_dequeue;
),
TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c%c:%c:%c",
TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c:%c:%c",
__get_str(name), __entry->maxpacket,
__entry->maxpacket_limit, __entry->max_streams,
__entry->maxburst, __entry->trb_enqueue,
@@ -314,9 +311,8 @@ DECLARE_EVENT_CLASS(dwc3_log_ep,
__entry->flags & DWC3_EP_ENABLED ? 'E' : 'e',
__entry->flags & DWC3_EP_STALL ? 'S' : 's',
__entry->flags & DWC3_EP_WEDGE ? 'W' : 'w',
__entry->flags & DWC3_EP_BUSY ? 'B' : 'b',
__entry->flags & DWC3_EP_TRANSFER_STARTED ? 'B' : 'b',
__entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p',
__entry->flags & DWC3_EP_MISSED_ISOC ? 'M' : 'm',
__entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e',
__entry->direction ? '<' : '>'
)

View File

@@ -1601,7 +1601,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
cdev->gadget->ep0->maxpacket;
if (gadget_is_superspeed(gadget)) {
if (gadget->speed >= USB_SPEED_SUPER) {
cdev->desc.bcdUSB = cpu_to_le16(0x0310);
cdev->desc.bcdUSB = cpu_to_le16(0x0320);
cdev->desc.bMaxPacketSize0 = 9;
} else {
cdev->desc.bcdUSB = cpu_to_le16(0x0210);

View File

@@ -705,6 +705,8 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
ecm_opts->bound = true;
}
ecm_string_defs[1].s = ecm->ethaddr;
us = usb_gstrings_attach(cdev, ecm_strings,
ARRAY_SIZE(ecm_string_defs));
if (IS_ERR(us))
@@ -928,7 +930,6 @@ static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
mutex_unlock(&opts->lock);
return ERR_PTR(-EINVAL);
}
ecm_string_defs[1].s = ecm->ethaddr;
ecm->port.ioport = netdev_priv(opts->net);
mutex_unlock(&opts->lock);

View File

@@ -1266,6 +1266,14 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
return ret;
}
#ifdef CONFIG_COMPAT
static long ffs_epfile_compat_ioctl(struct file *file, unsigned code,
unsigned long value)
{
return ffs_epfile_ioctl(file, code, value);
}
#endif
static const struct file_operations ffs_epfile_operations = {
.llseek = no_llseek,
@@ -1274,6 +1282,9 @@ static const struct file_operations ffs_epfile_operations = {
.read_iter = ffs_epfile_read_iter,
.release = ffs_epfile_release,
.unlocked_ioctl = ffs_epfile_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ffs_epfile_compat_ioctl,
#endif
};

View File

@@ -109,6 +109,7 @@ static inline struct f_midi *func_to_midi(struct usb_function *f)
static void f_midi_transmit(struct f_midi *midi);
static void f_midi_rmidi_free(struct snd_rawmidi *rmidi);
static void f_midi_free_inst(struct usb_function_instance *f);
DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
@@ -1102,7 +1103,7 @@ static ssize_t f_midi_opts_##name##_store(struct config_item *item, \
u32 num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
if (opts->refcnt > 1) { \
ret = -EBUSY; \
goto end; \
} \
@@ -1157,7 +1158,7 @@ static ssize_t f_midi_opts_id_store(struct config_item *item,
char *c;
mutex_lock(&opts->lock);
if (opts->refcnt) {
if (opts->refcnt > 1) {
ret = -EBUSY;
goto end;
}
@@ -1198,13 +1199,21 @@ static const struct config_item_type midi_func_type = {
static void f_midi_free_inst(struct usb_function_instance *f)
{
struct f_midi_opts *opts;
bool free = false;
opts = container_of(f, struct f_midi_opts, func_inst);
if (opts->id_allocated)
kfree(opts->id);
mutex_lock(&opts->lock);
if (!--opts->refcnt) {
free = true;
}
mutex_unlock(&opts->lock);
kfree(opts);
if (free) {
if (opts->id_allocated)
kfree(opts->id);
kfree(opts);
}
}
static struct usb_function_instance *f_midi_alloc_inst(void)
@@ -1223,6 +1232,7 @@ static struct usb_function_instance *f_midi_alloc_inst(void)
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
opts->refcnt = 1;
config_group_init_type_name(&opts->func_inst.group, "",
&midi_func_type);
@@ -1234,6 +1244,7 @@ static void f_midi_free(struct usb_function *f)
{
struct f_midi *midi;
struct f_midi_opts *opts;
bool free = false;
midi = func_to_midi(f);
opts = container_of(f->fi, struct f_midi_opts, func_inst);
@@ -1242,9 +1253,12 @@ static void f_midi_free(struct usb_function *f)
kfree(midi->id);
kfifo_free(&midi->in_req_fifo);
kfree(midi);
--opts->refcnt;
free = true;
}
mutex_unlock(&opts->lock);
if (free)
f_midi_free_inst(&opts->func_inst);
}
static void f_midi_rmidi_free(struct snd_rawmidi *rmidi)

View File

@@ -631,19 +631,19 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return -EAGAIN;
}
list_add(&req->list, &dev->tx_reqs_active);
/* here, we unlock, and only unlock, to avoid deadlock. */
spin_unlock(&dev->lock);
value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
spin_lock(&dev->lock);
if (value) {
list_del(&req->list);
list_add(&req->list, &dev->tx_reqs);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
list_add(&req->list, &dev->tx_reqs_active);
}
spin_unlock_irqrestore(&dev->lock, flags);

View File

@@ -851,6 +851,9 @@ int rndis_msg_parser(struct rndis_params *params, u8 *buf)
*/
pr_warn("%s: unknown RNDIS message 0x%08X len %d\n",
__func__, MsgType, MsgLength);
/* Garbled message can be huge, so limit what we display */
if (MsgLength > 16)
MsgLength = 16;
print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
buf, MsgLength);
break;

View File

@@ -844,6 +844,10 @@ struct net_device *gether_setup_name_default(const char *netname)
net->ethtool_ops = &ops;
SET_NETDEV_DEVTYPE(net, &gadget_type);
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
return net;
}
EXPORT_SYMBOL_GPL(gether_setup_name_default);

View File

@@ -179,7 +179,7 @@ config USB_R8A66597
config USB_RENESAS_USBHS_UDC
tristate 'Renesas USBHS controller'
depends on USB_RENESAS_USBHS && HAS_DMA
depends on USB_RENESAS_USBHS
help
Renesas USBHS is a discrete USB host and peripheral controller chip
that supports both full and high speed USB 2.0 data transfers.
@@ -192,7 +192,7 @@ config USB_RENESAS_USBHS_UDC
config USB_RENESAS_USB3
tristate 'Renesas USB3.0 Peripheral controller'
depends on ARCH_RENESAS || COMPILE_TEST
depends on EXTCON && HAS_DMA
depends on EXTCON
help
Renesas USB3.0 Peripheral controller is a USB peripheral controller
that supports super, high, and full speed USB 3.0 data transfers.
@@ -438,6 +438,8 @@ config USB_GADGET_XILINX
dynamically linked module called "udc-xilinx" and force all
gadget drivers to also be dynamically linked.
source "drivers/usb/gadget/udc/aspeed-vhub/Kconfig"
#
# LAST -- dummy/emulated controller
#

View File

@@ -39,4 +39,5 @@ obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o
obj-$(CONFIG_USB_GR_UDC) += gr_udc.o
obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o
obj-$(CONFIG_USB_SNP_UDC_PLAT) += snps_udc_plat.o
obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub/
obj-$(CONFIG_USB_BDC_UDC) += bdc/

View File

@@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0+
config USB_ASPEED_VHUB
tristate "Aspeed vHub UDC driver"
depends on ARCH_ASPEED || COMPILE_TEST
help
USB peripheral controller for the Aspeed AST2500 family
SoCs supporting the "vHub" functionality and USB2.0

View File

@@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0+
obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub.o
aspeed-vhub-y := core.o ep0.o epn.o dev.o hub.o

View File

@@ -0,0 +1,425 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
*
* core.c - Top level support
*
* Copyright 2017 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include "vhub.h"
void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
int status)
{
bool internal = req->internal;
EPVDBG(ep, "completing request @%p, status %d\n", req, status);
list_del_init(&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
if (req->req.dma) {
if (!WARN_ON(!ep->dev))
usb_gadget_unmap_request(&ep->dev->gadget,
&req->req, ep->epn.is_in);
req->req.dma = 0;
}
/*
* If this isn't an internal EP0 request, call the core
* to call the gadget completion.
*/
if (!internal) {
spin_unlock(&ep->vhub->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->vhub->lock);
}
}
void ast_vhub_nuke(struct ast_vhub_ep *ep, int status)
{
struct ast_vhub_req *req;
EPDBG(ep, "Nuking\n");
/* Beware, lock will be dropped & req-acquired by done() */
while (!list_empty(&ep->queue)) {
req = list_first_entry(&ep->queue, struct ast_vhub_req, queue);
ast_vhub_done(ep, req, status);
}
}
struct usb_request *ast_vhub_alloc_request(struct usb_ep *u_ep,
gfp_t gfp_flags)
{
struct ast_vhub_req *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
return &req->req;
}
void ast_vhub_free_request(struct usb_ep *u_ep, struct usb_request *u_req)
{
struct ast_vhub_req *req = to_ast_req(u_req);
kfree(req);
}
static irqreturn_t ast_vhub_irq(int irq, void *data)
{
struct ast_vhub *vhub = data;
irqreturn_t iret = IRQ_NONE;
u32 istat;
/* Stale interrupt while tearing down */
if (!vhub->ep0_bufs)
return IRQ_NONE;
spin_lock(&vhub->lock);
/* Read and ACK interrupts */
istat = readl(vhub->regs + AST_VHUB_ISR);
if (!istat)
goto bail;
writel(istat, vhub->regs + AST_VHUB_ISR);
iret = IRQ_HANDLED;
UDCVDBG(vhub, "irq status=%08x, ep_acks=%08x ep_nacks=%08x\n",
istat,
readl(vhub->regs + AST_VHUB_EP_ACK_ISR),
readl(vhub->regs + AST_VHUB_EP_NACK_ISR));
/* Handle generic EPs first */
if (istat & VHUB_IRQ_EP_POOL_ACK_STALL) {
u32 i, ep_acks = readl(vhub->regs + AST_VHUB_EP_ACK_ISR);
writel(ep_acks, vhub->regs + AST_VHUB_EP_ACK_ISR);
for (i = 0; ep_acks && i < AST_VHUB_NUM_GEN_EPs; i++) {
u32 mask = VHUB_EP_IRQ(i);
if (ep_acks & mask) {
ast_vhub_epn_ack_irq(&vhub->epns[i]);
ep_acks &= ~mask;
}
}
}
/* Handle device interrupts */
if (istat & (VHUB_IRQ_DEVICE1 |
VHUB_IRQ_DEVICE2 |
VHUB_IRQ_DEVICE3 |
VHUB_IRQ_DEVICE4 |
VHUB_IRQ_DEVICE5)) {
if (istat & VHUB_IRQ_DEVICE1)
ast_vhub_dev_irq(&vhub->ports[0].dev);
if (istat & VHUB_IRQ_DEVICE2)
ast_vhub_dev_irq(&vhub->ports[1].dev);
if (istat & VHUB_IRQ_DEVICE3)
ast_vhub_dev_irq(&vhub->ports[2].dev);
if (istat & VHUB_IRQ_DEVICE4)
ast_vhub_dev_irq(&vhub->ports[3].dev);
if (istat & VHUB_IRQ_DEVICE5)
ast_vhub_dev_irq(&vhub->ports[4].dev);
}
/* Handle top-level vHub EP0 interrupts */
if (istat & (VHUB_IRQ_HUB_EP0_OUT_ACK_STALL |
VHUB_IRQ_HUB_EP0_IN_ACK_STALL |
VHUB_IRQ_HUB_EP0_SETUP)) {
if (istat & VHUB_IRQ_HUB_EP0_IN_ACK_STALL)
ast_vhub_ep0_handle_ack(&vhub->ep0, true);
if (istat & VHUB_IRQ_HUB_EP0_OUT_ACK_STALL)
ast_vhub_ep0_handle_ack(&vhub->ep0, false);
if (istat & VHUB_IRQ_HUB_EP0_SETUP)
ast_vhub_ep0_handle_setup(&vhub->ep0);
}
/* Various top level bus events */
if (istat & (VHUB_IRQ_BUS_RESUME |
VHUB_IRQ_BUS_SUSPEND |
VHUB_IRQ_BUS_RESET)) {
if (istat & VHUB_IRQ_BUS_RESUME)
ast_vhub_hub_resume(vhub);
if (istat & VHUB_IRQ_BUS_SUSPEND)
ast_vhub_hub_suspend(vhub);
if (istat & VHUB_IRQ_BUS_RESET)
ast_vhub_hub_reset(vhub);
}
bail:
spin_unlock(&vhub->lock);
return iret;
}
void ast_vhub_init_hw(struct ast_vhub *vhub)
{
u32 ctrl;
UDCDBG(vhub,"(Re)Starting HW ...\n");
/* Enable PHY */
ctrl = VHUB_CTRL_PHY_CLK |
VHUB_CTRL_PHY_RESET_DIS;
/*
* We do *NOT* set the VHUB_CTRL_CLK_STOP_SUSPEND bit
* to stop the logic clock during suspend because
* it causes the registers to become inaccessible and
* we haven't yet figured out a good wayt to bring the
* controller back into life to issue a wakeup.
*/
/*
* Set some ISO & split control bits according to Aspeed
* recommendation
*
* VHUB_CTRL_ISO_RSP_CTRL: When set tells the HW to respond
* with 0 bytes data packet to ISO IN endpoints when no data
* is available.
*
* VHUB_CTRL_SPLIT_IN: This makes a SOF complete a split IN
* transaction.
*/
ctrl |= VHUB_CTRL_ISO_RSP_CTRL | VHUB_CTRL_SPLIT_IN;
writel(ctrl, vhub->regs + AST_VHUB_CTRL);
udelay(1);
/* Set descriptor ring size */
if (AST_VHUB_DESCS_COUNT == 256) {
ctrl |= VHUB_CTRL_LONG_DESC;
writel(ctrl, vhub->regs + AST_VHUB_CTRL);
} else {
BUILD_BUG_ON(AST_VHUB_DESCS_COUNT != 32);
}
/* Reset all devices */
writel(VHUB_SW_RESET_ALL, vhub->regs + AST_VHUB_SW_RESET);
udelay(1);
writel(0, vhub->regs + AST_VHUB_SW_RESET);
/* Disable and cleanup EP ACK/NACK interrupts */
writel(0, vhub->regs + AST_VHUB_EP_ACK_IER);
writel(0, vhub->regs + AST_VHUB_EP_NACK_IER);
writel(VHUB_EP_IRQ_ALL, vhub->regs + AST_VHUB_EP_ACK_ISR);
writel(VHUB_EP_IRQ_ALL, vhub->regs + AST_VHUB_EP_NACK_ISR);
/* Default settings for EP0, enable HW hub EP1 */
writel(0, vhub->regs + AST_VHUB_EP0_CTRL);
writel(VHUB_EP1_CTRL_RESET_TOGGLE |
VHUB_EP1_CTRL_ENABLE,
vhub->regs + AST_VHUB_EP1_CTRL);
writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG);
/* Configure EP0 DMA buffer */
writel(vhub->ep0.buf_dma, vhub->regs + AST_VHUB_EP0_DATA);
/* Clear address */
writel(0, vhub->regs + AST_VHUB_CONF);
/* Pullup hub (activate on host) */
if (vhub->force_usb1)
ctrl |= VHUB_CTRL_FULL_SPEED_ONLY;
ctrl |= VHUB_CTRL_UPSTREAM_CONNECT;
writel(ctrl, vhub->regs + AST_VHUB_CTRL);
/* Enable some interrupts */
writel(VHUB_IRQ_HUB_EP0_IN_ACK_STALL |
VHUB_IRQ_HUB_EP0_OUT_ACK_STALL |
VHUB_IRQ_HUB_EP0_SETUP |
VHUB_IRQ_EP_POOL_ACK_STALL |
VHUB_IRQ_BUS_RESUME |
VHUB_IRQ_BUS_SUSPEND |
VHUB_IRQ_BUS_RESET,
vhub->regs + AST_VHUB_IER);
}
static int ast_vhub_remove(struct platform_device *pdev)
{
struct ast_vhub *vhub = platform_get_drvdata(pdev);
unsigned long flags;
int i;
if (!vhub || !vhub->regs)
return 0;
/* Remove devices */
for (i = 0; i < AST_VHUB_NUM_PORTS; i++)
ast_vhub_del_dev(&vhub->ports[i].dev);
spin_lock_irqsave(&vhub->lock, flags);
/* Mask & ack all interrupts */
writel(0, vhub->regs + AST_VHUB_IER);
writel(VHUB_IRQ_ACK_ALL, vhub->regs + AST_VHUB_ISR);
/* Pull device, leave PHY enabled */
writel(VHUB_CTRL_PHY_CLK |
VHUB_CTRL_PHY_RESET_DIS,
vhub->regs + AST_VHUB_CTRL);
if (vhub->clk)
clk_disable_unprepare(vhub->clk);
spin_unlock_irqrestore(&vhub->lock, flags);
if (vhub->ep0_bufs)
dma_free_coherent(&pdev->dev,
AST_VHUB_EP0_MAX_PACKET *
(AST_VHUB_NUM_PORTS + 1),
vhub->ep0_bufs,
vhub->ep0_bufs_dma);
vhub->ep0_bufs = NULL;
return 0;
}
static int ast_vhub_probe(struct platform_device *pdev)
{
enum usb_device_speed max_speed;
struct ast_vhub *vhub;
struct resource *res;
int i, rc = 0;
vhub = devm_kzalloc(&pdev->dev, sizeof(*vhub), GFP_KERNEL);
if (!vhub)
return -ENOMEM;
spin_lock_init(&vhub->lock);
vhub->pdev = pdev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
vhub->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(vhub->regs)) {
dev_err(&pdev->dev, "Failed to map resources\n");
return PTR_ERR(vhub->regs);
}
UDCDBG(vhub, "vHub@%pR mapped @%p\n", res, vhub->regs);
platform_set_drvdata(pdev, vhub);
vhub->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(vhub->clk)) {
rc = PTR_ERR(vhub->clk);
goto err;
}
rc = clk_prepare_enable(vhub->clk);
if (rc) {
dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", rc);
goto err;
}
/* Check if we need to limit the HW to USB1 */
max_speed = usb_get_maximum_speed(&pdev->dev);
if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH)
vhub->force_usb1 = true;
/* Mask & ack all interrupts before installing the handler */
writel(0, vhub->regs + AST_VHUB_IER);
writel(VHUB_IRQ_ACK_ALL, vhub->regs + AST_VHUB_ISR);
/* Find interrupt and install handler */
vhub->irq = platform_get_irq(pdev, 0);
if (vhub->irq < 0) {
dev_err(&pdev->dev, "Failed to get interrupt\n");
rc = vhub->irq;
goto err;
}
rc = devm_request_irq(&pdev->dev, vhub->irq, ast_vhub_irq, 0,
KBUILD_MODNAME, vhub);
if (rc) {
dev_err(&pdev->dev, "Failed to request interrupt\n");
goto err;
}
/*
* Allocate DMA buffers for all EP0s in one chunk,
* one per port and one for the vHub itself
*/
vhub->ep0_bufs = dma_alloc_coherent(&pdev->dev,
AST_VHUB_EP0_MAX_PACKET *
(AST_VHUB_NUM_PORTS + 1),
&vhub->ep0_bufs_dma, GFP_KERNEL);
if (!vhub->ep0_bufs) {
dev_err(&pdev->dev, "Failed to allocate EP0 DMA buffers\n");
rc = -ENOMEM;
goto err;
}
UDCVDBG(vhub, "EP0 DMA buffers @%p (DMA 0x%08x)\n",
vhub->ep0_bufs, (u32)vhub->ep0_bufs_dma);
/* Init vHub EP0 */
ast_vhub_init_ep0(vhub, &vhub->ep0, NULL);
/* Init devices */
for (i = 0; i < AST_VHUB_NUM_PORTS && rc == 0; i++)
rc = ast_vhub_init_dev(vhub, i);
if (rc)
goto err;
/* Init hub emulation */
ast_vhub_init_hub(vhub);
/* Initialize HW */
ast_vhub_init_hw(vhub);
dev_info(&pdev->dev, "Initialized virtual hub in USB%d mode\n",
vhub->force_usb1 ? 1 : 2);
return 0;
err:
ast_vhub_remove(pdev);
return rc;
}
static const struct of_device_id ast_vhub_dt_ids[] = {
{
.compatible = "aspeed,ast2400-usb-vhub",
},
{
.compatible = "aspeed,ast2500-usb-vhub",
},
{ }
};
MODULE_DEVICE_TABLE(of, ast_vhub_dt_ids);
static struct platform_driver ast_vhub_driver = {
.probe = ast_vhub_probe,
.remove = ast_vhub_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = ast_vhub_dt_ids,
},
};
module_platform_driver(ast_vhub_driver);
MODULE_DESCRIPTION("Aspeed vHub udc driver");
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,589 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
*
* dev.c - Individual device/gadget management (ie, a port = a gadget)
*
* Copyright 2017 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "vhub.h"
void ast_vhub_dev_irq(struct ast_vhub_dev *d)
{
u32 istat = readl(d->regs + AST_VHUB_DEV_ISR);
writel(istat, d->regs + AST_VHUB_DEV_ISR);
if (istat & VHUV_DEV_IRQ_EP0_IN_ACK_STALL)
ast_vhub_ep0_handle_ack(&d->ep0, true);
if (istat & VHUV_DEV_IRQ_EP0_OUT_ACK_STALL)
ast_vhub_ep0_handle_ack(&d->ep0, false);
if (istat & VHUV_DEV_IRQ_EP0_SETUP)
ast_vhub_ep0_handle_setup(&d->ep0);
}
static void ast_vhub_dev_enable(struct ast_vhub_dev *d)
{
u32 reg, hmsk;
if (d->enabled)
return;
/* Enable device and its EP0 interrupts */
reg = VHUB_DEV_EN_ENABLE_PORT |
VHUB_DEV_EN_EP0_IN_ACK_IRQEN |
VHUB_DEV_EN_EP0_OUT_ACK_IRQEN |
VHUB_DEV_EN_EP0_SETUP_IRQEN;
if (d->gadget.speed == USB_SPEED_HIGH)
reg |= VHUB_DEV_EN_SPEED_SEL_HIGH;
writel(reg, d->regs + AST_VHUB_DEV_EN_CTRL);
/* Enable device interrupt in the hub as well */
hmsk = VHUB_IRQ_DEVICE1 << d->index;
reg = readl(d->vhub->regs + AST_VHUB_IER);
reg |= hmsk;
writel(reg, d->vhub->regs + AST_VHUB_IER);
/* Set EP0 DMA buffer address */
writel(d->ep0.buf_dma, d->regs + AST_VHUB_DEV_EP0_DATA);
d->enabled = true;
}
static void ast_vhub_dev_disable(struct ast_vhub_dev *d)
{
u32 reg, hmsk;
if (!d->enabled)
return;
/* Disable device interrupt in the hub */
hmsk = VHUB_IRQ_DEVICE1 << d->index;
reg = readl(d->vhub->regs + AST_VHUB_IER);
reg &= ~hmsk;
writel(reg, d->vhub->regs + AST_VHUB_IER);
/* Then disable device */
writel(0, d->regs + AST_VHUB_DEV_EN_CTRL);
d->gadget.speed = USB_SPEED_UNKNOWN;
d->enabled = false;
d->suspended = false;
}
static int ast_vhub_dev_feature(struct ast_vhub_dev *d,
u16 wIndex, u16 wValue,
bool is_set)
{
DDBG(d, "%s_FEATURE(dev val=%02x)\n",
is_set ? "SET" : "CLEAR", wValue);
if (wValue != USB_DEVICE_REMOTE_WAKEUP)
return std_req_driver;
d->wakeup_en = is_set;
return std_req_complete;
}
static int ast_vhub_ep_feature(struct ast_vhub_dev *d,
u16 wIndex, u16 wValue, bool is_set)
{
struct ast_vhub_ep *ep;
int ep_num;
ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
DDBG(d, "%s_FEATURE(ep%d val=%02x)\n",
is_set ? "SET" : "CLEAR", ep_num, wValue);
if (ep_num == 0)
return std_req_complete;
if (ep_num >= AST_VHUB_NUM_GEN_EPs || !d->epns[ep_num - 1])
return std_req_stall;
if (wValue != USB_ENDPOINT_HALT)
return std_req_driver;
ep = d->epns[ep_num - 1];
if (WARN_ON(!ep))
return std_req_stall;
if (!ep->epn.enabled || !ep->ep.desc || ep->epn.is_iso ||
ep->epn.is_in != !!(wIndex & USB_DIR_IN))
return std_req_stall;
DDBG(d, "%s stall on EP %d\n",
is_set ? "setting" : "clearing", ep_num);
ep->epn.stalled = is_set;
ast_vhub_update_epn_stall(ep);
return std_req_complete;
}
static int ast_vhub_dev_status(struct ast_vhub_dev *d,
u16 wIndex, u16 wValue)
{
u8 st0;
DDBG(d, "GET_STATUS(dev)\n");
st0 = d->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED;
if (d->wakeup_en)
st0 |= 1 << USB_DEVICE_REMOTE_WAKEUP;
return ast_vhub_simple_reply(&d->ep0, st0, 0);
}
static int ast_vhub_ep_status(struct ast_vhub_dev *d,
u16 wIndex, u16 wValue)
{
int ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
struct ast_vhub_ep *ep;
u8 st0 = 0;
DDBG(d, "GET_STATUS(ep%d)\n", ep_num);
if (ep_num >= AST_VHUB_NUM_GEN_EPs)
return std_req_stall;
if (ep_num != 0) {
ep = d->epns[ep_num - 1];
if (!ep)
return std_req_stall;
if (!ep->epn.enabled || !ep->ep.desc || ep->epn.is_iso ||
ep->epn.is_in != !!(wIndex & USB_DIR_IN))
return std_req_stall;
if (ep->epn.stalled)
st0 |= 1 << USB_ENDPOINT_HALT;
}
return ast_vhub_simple_reply(&d->ep0, st0, 0);
}
static void ast_vhub_dev_set_address(struct ast_vhub_dev *d, u8 addr)
{
u32 reg;
DDBG(d, "SET_ADDRESS: Got address %x\n", addr);
reg = readl(d->regs + AST_VHUB_DEV_EN_CTRL);
reg &= ~VHUB_DEV_EN_ADDR_MASK;
reg |= VHUB_DEV_EN_SET_ADDR(addr);
writel(reg, d->regs + AST_VHUB_DEV_EN_CTRL);
}
int ast_vhub_std_dev_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq)
{
struct ast_vhub_dev *d = ep->dev;
u16 wValue, wIndex;
/* No driver, we shouldn't be enabled ... */
if (!d->driver || !d->enabled || d->suspended) {
EPDBG(ep,
"Device is wrong state driver=%p enabled=%d"
" suspended=%d\n",
d->driver, d->enabled, d->suspended);
return std_req_stall;
}
/* First packet, grab speed */
if (d->gadget.speed == USB_SPEED_UNKNOWN) {
d->gadget.speed = ep->vhub->speed;
if (d->gadget.speed > d->driver->max_speed)
d->gadget.speed = d->driver->max_speed;
DDBG(d, "fist packet, captured speed %d\n",
d->gadget.speed);
}
wValue = le16_to_cpu(crq->wValue);
wIndex = le16_to_cpu(crq->wIndex);
switch ((crq->bRequestType << 8) | crq->bRequest) {
/* SET_ADDRESS */
case DeviceOutRequest | USB_REQ_SET_ADDRESS:
ast_vhub_dev_set_address(d, wValue);
return std_req_complete;
/* GET_STATUS */
case DeviceRequest | USB_REQ_GET_STATUS:
return ast_vhub_dev_status(d, wIndex, wValue);
case InterfaceRequest | USB_REQ_GET_STATUS:
return ast_vhub_simple_reply(ep, 0, 0);
case EndpointRequest | USB_REQ_GET_STATUS:
return ast_vhub_ep_status(d, wIndex, wValue);
/* SET/CLEAR_FEATURE */
case DeviceOutRequest | USB_REQ_SET_FEATURE:
return ast_vhub_dev_feature(d, wIndex, wValue, true);
case DeviceOutRequest | USB_REQ_CLEAR_FEATURE:
return ast_vhub_dev_feature(d, wIndex, wValue, false);
case EndpointOutRequest | USB_REQ_SET_FEATURE:
return ast_vhub_ep_feature(d, wIndex, wValue, true);
case EndpointOutRequest | USB_REQ_CLEAR_FEATURE:
return ast_vhub_ep_feature(d, wIndex, wValue, false);
}
return std_req_driver;
}
static int ast_vhub_udc_wakeup(struct usb_gadget* gadget)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
unsigned long flags;
int rc = -EINVAL;
spin_lock_irqsave(&d->vhub->lock, flags);
if (!d->wakeup_en)
goto err;
DDBG(d, "Device initiated wakeup\n");
/* Wakeup the host */
ast_vhub_hub_wake_all(d->vhub);
rc = 0;
err:
spin_unlock_irqrestore(&d->vhub->lock, flags);
return rc;
}
static int ast_vhub_udc_get_frame(struct usb_gadget* gadget)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
return (readl(d->vhub->regs + AST_VHUB_USBSTS) >> 16) & 0x7ff;
}
static void ast_vhub_dev_nuke(struct ast_vhub_dev *d)
{
unsigned int i;
for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) {
if (!d->epns[i])
continue;
ast_vhub_nuke(d->epns[i], -ESHUTDOWN);
}
}
static int ast_vhub_udc_pullup(struct usb_gadget* gadget, int on)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
unsigned long flags;
spin_lock_irqsave(&d->vhub->lock, flags);
DDBG(d, "pullup(%d)\n", on);
/* Mark disconnected in the hub */
ast_vhub_device_connect(d->vhub, d->index, on);
/*
* If enabled, nuke all requests if any (there shouldn't be)
* and disable the port. This will clear the address too.
*/
if (d->enabled) {
ast_vhub_dev_nuke(d);
ast_vhub_dev_disable(d);
}
spin_unlock_irqrestore(&d->vhub->lock, flags);
return 0;
}
static int ast_vhub_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
unsigned long flags;
spin_lock_irqsave(&d->vhub->lock, flags);
DDBG(d, "start\n");
/* We don't do much more until the hub enables us */
d->driver = driver;
d->gadget.is_selfpowered = 1;
spin_unlock_irqrestore(&d->vhub->lock, flags);
return 0;
}
static struct usb_ep *ast_vhub_udc_match_ep(struct usb_gadget *gadget,
struct usb_endpoint_descriptor *desc,
struct usb_ss_ep_comp_descriptor *ss)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
struct ast_vhub_ep *ep;
struct usb_ep *u_ep;
unsigned int max, addr, i;
DDBG(d, "Match EP type %d\n", usb_endpoint_type(desc));
/*
* First we need to look for an existing unclaimed EP as another
* configuration may have already associated a bunch of EPs with
* this gadget. This duplicates the code in usb_ep_autoconfig_ss()
* unfortunately.
*/
list_for_each_entry(u_ep, &gadget->ep_list, ep_list) {
if (usb_gadget_ep_match_desc(gadget, u_ep, desc, ss)) {
DDBG(d, " -> using existing EP%d\n",
to_ast_ep(u_ep)->d_idx);
return u_ep;
}
}
/*
* We didn't find one, we need to grab one from the pool.
*
* First let's do some sanity checking
*/
switch(usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
/* Only EP0 can be a control endpoint */
return NULL;
case USB_ENDPOINT_XFER_ISOC:
/* ISO: limit 1023 bytes full speed, 1024 high/super speed */
if (gadget_is_dualspeed(gadget))
max = 1024;
else
max = 1023;
break;
case USB_ENDPOINT_XFER_BULK:
if (gadget_is_dualspeed(gadget))
max = 512;
else
max = 64;
break;
case USB_ENDPOINT_XFER_INT:
if (gadget_is_dualspeed(gadget))
max = 1024;
else
max = 64;
break;
}
if (usb_endpoint_maxp(desc) > max)
return NULL;
/*
* Find a free EP address for that device. We can't
* let the generic code assign these as it would
* create overlapping numbers for IN and OUT which
* we don't support, so also create a suitable name
* that will allow the generic code to use our
* assigned address.
*/
for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++)
if (d->epns[i] == NULL)
break;
if (i >= AST_VHUB_NUM_GEN_EPs)
return NULL;
addr = i + 1;
/*
* Now grab an EP from the shared pool and associate
* it with our device
*/
ep = ast_vhub_alloc_epn(d, addr);
if (!ep)
return NULL;
DDBG(d, "Allocated epn#%d for port EP%d\n",
ep->epn.g_idx, addr);
return &ep->ep;
}
static int ast_vhub_udc_stop(struct usb_gadget *gadget)
{
struct ast_vhub_dev *d = to_ast_dev(gadget);
unsigned long flags;
spin_lock_irqsave(&d->vhub->lock, flags);
DDBG(d, "stop\n");
d->driver = NULL;
d->gadget.speed = USB_SPEED_UNKNOWN;
ast_vhub_dev_nuke(d);
if (d->enabled)
ast_vhub_dev_disable(d);
spin_unlock_irqrestore(&d->vhub->lock, flags);
return 0;
}
static struct usb_gadget_ops ast_vhub_udc_ops = {
.get_frame = ast_vhub_udc_get_frame,
.wakeup = ast_vhub_udc_wakeup,
.pullup = ast_vhub_udc_pullup,
.udc_start = ast_vhub_udc_start,
.udc_stop = ast_vhub_udc_stop,
.match_ep = ast_vhub_udc_match_ep,
};
void ast_vhub_dev_suspend(struct ast_vhub_dev *d)
{
d->suspended = true;
if (d->driver) {
spin_unlock(&d->vhub->lock);
d->driver->suspend(&d->gadget);
spin_lock(&d->vhub->lock);
}
}
void ast_vhub_dev_resume(struct ast_vhub_dev *d)
{
d->suspended = false;
if (d->driver) {
spin_unlock(&d->vhub->lock);
d->driver->resume(&d->gadget);
spin_lock(&d->vhub->lock);
}
}
void ast_vhub_dev_reset(struct ast_vhub_dev *d)
{
/*
* If speed is not set, we enable the port. If it is,
* send reset to the gadget and reset "speed".
*
* Speed is an indication that we have got the first
* setup packet to the device.
*/
if (d->gadget.speed == USB_SPEED_UNKNOWN && !d->enabled) {
DDBG(d, "Reset at unknown speed of disabled device, enabling...\n");
ast_vhub_dev_enable(d);
d->suspended = false;
}
if (d->gadget.speed != USB_SPEED_UNKNOWN && d->driver) {
unsigned int i;
DDBG(d, "Reset at known speed of bound device, resetting...\n");
spin_unlock(&d->vhub->lock);
d->driver->reset(&d->gadget);
spin_lock(&d->vhub->lock);
/*
* Disable/re-enable HW, this will clear the address
* and speed setting.
*/
ast_vhub_dev_disable(d);
ast_vhub_dev_enable(d);
/* Clear stall on all EPs */
for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) {
struct ast_vhub_ep *ep = d->epns[i];
if (ep && ep->epn.stalled) {
ep->epn.stalled = false;
ast_vhub_update_epn_stall(ep);
}
}
/* Additional cleanups */
d->wakeup_en = false;
d->suspended = false;
}
}
void ast_vhub_del_dev(struct ast_vhub_dev *d)
{
unsigned long flags;
spin_lock_irqsave(&d->vhub->lock, flags);
if (!d->registered) {
spin_unlock_irqrestore(&d->vhub->lock, flags);
return;
}
d->registered = false;
spin_unlock_irqrestore(&d->vhub->lock, flags);
usb_del_gadget_udc(&d->gadget);
device_unregister(d->port_dev);
}
static void ast_vhub_dev_release(struct device *dev)
{
kfree(dev);
}
int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
{
struct ast_vhub_dev *d = &vhub->ports[idx].dev;
struct device *parent = &vhub->pdev->dev;
int rc;
d->vhub = vhub;
d->index = idx;
d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1);
d->regs = vhub->regs + 0x100 + 0x10 * idx;
ast_vhub_init_ep0(vhub, &d->ep0, d);
/*
* The UDC core really needs us to have separate and uniquely
* named "parent" devices for each port so we create a sub device
* here for that purpose
*/
d->port_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!d->port_dev)
return -ENOMEM;
device_initialize(d->port_dev);
d->port_dev->release = ast_vhub_dev_release;
d->port_dev->parent = parent;
dev_set_name(d->port_dev, "%s:p%d", dev_name(parent), idx + 1);
rc = device_add(d->port_dev);
if (rc)
goto fail_add;
/* Populate gadget */
INIT_LIST_HEAD(&d->gadget.ep_list);
d->gadget.ops = &ast_vhub_udc_ops;
d->gadget.ep0 = &d->ep0.ep;
d->gadget.name = KBUILD_MODNAME;
if (vhub->force_usb1)
d->gadget.max_speed = USB_SPEED_FULL;
else
d->gadget.max_speed = USB_SPEED_HIGH;
d->gadget.speed = USB_SPEED_UNKNOWN;
d->gadget.dev.of_node = vhub->pdev->dev.of_node;
rc = usb_add_gadget_udc(d->port_dev, &d->gadget);
if (rc != 0)
goto fail_udc;
d->registered = true;
return 0;
fail_udc:
device_del(d->port_dev);
fail_add:
put_device(d->port_dev);
return rc;
}

View File

@@ -0,0 +1,486 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
*
* ep0.c - Endpoint 0 handling
*
* Copyright 2017 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include "vhub.h"
int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
{
struct usb_request *req = &ep->ep0.req.req;
int rc;
if (WARN_ON(ep->d_idx != 0))
return std_req_stall;
if (WARN_ON(!ep->ep0.dir_in))
return std_req_stall;
if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
return std_req_stall;
if (WARN_ON(req->status == -EINPROGRESS))
return std_req_stall;
req->buf = ptr;
req->length = len;
req->complete = NULL;
req->zero = true;
/*
* Call internal queue directly after dropping the lock. This is
* safe to do as the reply is always the last thing done when
* processing a SETUP packet, usually as a tail call
*/
spin_unlock(&ep->vhub->lock);
if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
rc = std_req_stall;
else
rc = std_req_data;
spin_lock(&ep->vhub->lock);
return rc;
}
int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
{
u8 *buffer = ep->buf;
unsigned int i;
va_list args;
va_start(args, len);
/* Copy data directly into EP buffer */
for (i = 0; i < len; i++)
buffer[i] = va_arg(args, int);
va_end(args);
/* req->buf NULL means data is already there */
return ast_vhub_reply(ep, NULL, len);
}
void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
{
struct usb_ctrlrequest crq;
enum std_req_rc std_req_rc;
int rc = -ENODEV;
if (WARN_ON(ep->d_idx != 0))
return;
/*
* Grab the setup packet from the chip and byteswap
* interesting fields
*/
memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
crq.bRequestType, crq.bRequest,
le16_to_cpu(crq.wValue),
le16_to_cpu(crq.wIndex),
le16_to_cpu(crq.wLength),
(crq.bRequestType & USB_DIR_IN) ? "in" : "out",
ep->ep0.state);
/* Check our state, cancel pending requests if needed */
if (ep->ep0.state != ep0_state_token) {
EPDBG(ep, "wrong state\n");
ast_vhub_nuke(ep, 0);
goto stall;
}
/* Calculate next state for EP0 */
ep->ep0.state = ep0_state_data;
ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
/* If this is the vHub, we handle requests differently */
std_req_rc = std_req_driver;
if (ep->dev == NULL) {
if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
std_req_rc = ast_vhub_std_hub_request(ep, &crq);
else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
std_req_rc = ast_vhub_class_hub_request(ep, &crq);
else
std_req_rc = std_req_stall;
} else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
std_req_rc = ast_vhub_std_dev_request(ep, &crq);
/* Act upon result */
switch(std_req_rc) {
case std_req_complete:
goto complete;
case std_req_stall:
goto stall;
case std_req_driver:
break;
case std_req_data:
return;
}
/* Pass request up to the gadget driver */
if (WARN_ON(!ep->dev))
goto stall;
if (ep->dev->driver) {
EPDBG(ep, "forwarding to gadget...\n");
spin_unlock(&ep->vhub->lock);
rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
spin_lock(&ep->vhub->lock);
EPDBG(ep, "driver returned %d\n", rc);
} else {
EPDBG(ep, "no gadget for request !\n");
}
if (rc >= 0)
return;
stall:
EPDBG(ep, "stalling\n");
writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
ep->ep0.state = ep0_state_status;
ep->ep0.dir_in = false;
return;
complete:
EPVDBG(ep, "sending [in] status with no data\n");
writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
ep->ep0.state = ep0_state_status;
ep->ep0.dir_in = false;
}
static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
struct ast_vhub_req *req)
{
unsigned int chunk;
u32 reg;
/* If this is a 0-length request, it's the gadget trying to
* send a status on our behalf. We take it from here.
*/
if (req->req.length == 0)
req->last_desc = 1;
/* Are we done ? Complete request, otherwise wait for next interrupt */
if (req->last_desc >= 0) {
EPVDBG(ep, "complete send %d/%d\n",
req->req.actual, req->req.length);
ep->ep0.state = ep0_state_status;
writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
ast_vhub_done(ep, req, 0);
return;
}
/*
* Next chunk cropped to max packet size. Also check if this
* is the last packet
*/
chunk = req->req.length - req->req.actual;
if (chunk > ep->ep.maxpacket)
chunk = ep->ep.maxpacket;
else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
req->last_desc = 1;
EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
/*
* Copy data if any (internal requests already have data
* in the EP buffer)
*/
if (chunk && req->req.buf)
memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
/* Remember chunk size and trigger send */
reg = VHUB_EP0_SET_TX_LEN(chunk);
writel(reg, ep->ep0.ctlstat);
writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
req->req.actual += chunk;
}
static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
{
EPVDBG(ep, "rx prime\n");
/* Prime endpoint for receiving data */
writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL);
}
static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
unsigned int len)
{
unsigned int remain;
int rc = 0;
/* We are receiving... grab request */
remain = req->req.length - req->req.actual;
EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
/* Are we getting more than asked ? */
if (len > remain) {
EPDBG(ep, "receiving too much (ovf: %d) !\n",
len - remain);
len = remain;
rc = -EOVERFLOW;
}
if (len && req->req.buf)
memcpy(req->req.buf + req->req.actual, ep->buf, len);
req->req.actual += len;
/* Done ? */
if (len < ep->ep.maxpacket || len == remain) {
ep->ep0.state = ep0_state_status;
writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
ast_vhub_done(ep, req, rc);
} else
ast_vhub_ep0_rx_prime(ep);
}
void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
{
struct ast_vhub_req *req;
struct ast_vhub *vhub = ep->vhub;
struct device *dev = &vhub->pdev->dev;
bool stall = false;
u32 stat;
/* Read EP0 status */
stat = readl(ep->ep0.ctlstat);
/* Grab current request if any */
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
switch(ep->ep0.state) {
case ep0_state_token:
/* There should be no request queued in that state... */
if (req) {
dev_warn(dev, "request present while in TOKEN state\n");
ast_vhub_nuke(ep, -EINVAL);
}
dev_warn(dev, "ack while in TOKEN state\n");
stall = true;
break;
case ep0_state_data:
/* Check the state bits corresponding to our direction */
if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
(!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
(ep->ep0.dir_in != in_ack)) {
dev_warn(dev, "irq state mismatch");
stall = true;
break;
}
/*
* We are in data phase and there's no request, something is
* wrong, stall
*/
if (!req) {
dev_warn(dev, "data phase, no request\n");
stall = true;
break;
}
/* We have a request, handle data transfers */
if (ep->ep0.dir_in)
ast_vhub_ep0_do_send(ep, req);
else
ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
return;
case ep0_state_status:
/* Nuke stale requests */
if (req) {
dev_warn(dev, "request present while in STATUS state\n");
ast_vhub_nuke(ep, -EINVAL);
}
/*
* If the status phase completes with the wrong ack, stall
* the endpoint just in case, to abort whatever the host
* was doing.
*/
if (ep->ep0.dir_in == in_ack) {
dev_warn(dev, "status direction mismatch\n");
stall = true;
}
}
/* Reset to token state */
ep->ep0.state = ep0_state_token;
if (stall)
writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
}
static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
gfp_t gfp_flags)
{
struct ast_vhub_req *req = to_ast_req(u_req);
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
struct device *dev = &vhub->pdev->dev;
unsigned long flags;
/* Paranoid cheks */
if (!u_req || (!u_req->complete && !req->internal)) {
dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
if (u_req) {
dev_warn(dev, "complete=%p internal=%d\n",
u_req->complete, req->internal);
}
return -EINVAL;
}
/* Not endpoint 0 ? */
if (WARN_ON(ep->d_idx != 0))
return -EINVAL;
/* Disabled device */
if (ep->dev && (!ep->dev->enabled || ep->dev->suspended))
return -ESHUTDOWN;
/* Data, no buffer and not internal ? */
if (u_req->length && !u_req->buf && !req->internal) {
dev_warn(dev, "Request with no buffer !\n");
return -EINVAL;
}
EPVDBG(ep, "enqueue req @%p\n", req);
EPVDBG(ep, " l=%d zero=%d noshort=%d is_in=%d\n",
u_req->length, u_req->zero,
u_req->short_not_ok, ep->ep0.dir_in);
/* Initialize request progress fields */
u_req->status = -EINPROGRESS;
u_req->actual = 0;
req->last_desc = -1;
req->active = false;
spin_lock_irqsave(&vhub->lock, flags);
/* EP0 can only support a single request at a time */
if (!list_empty(&ep->queue) || ep->ep0.state == ep0_state_token) {
dev_warn(dev, "EP0: Request in wrong state\n");
spin_unlock_irqrestore(&vhub->lock, flags);
return -EBUSY;
}
/* Add request to list and kick processing if empty */
list_add_tail(&req->queue, &ep->queue);
if (ep->ep0.dir_in) {
/* IN request, send data */
ast_vhub_ep0_do_send(ep, req);
} else if (u_req->length == 0) {
/* 0-len request, send completion as rx */
EPVDBG(ep, "0-length rx completion\n");
ep->ep0.state = ep0_state_status;
writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
ast_vhub_done(ep, req, 0);
} else {
/* OUT request, start receiver */
ast_vhub_ep0_rx_prime(ep);
}
spin_unlock_irqrestore(&vhub->lock, flags);
return 0;
}
static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
struct ast_vhub_req *req;
unsigned long flags;
int rc = -EINVAL;
spin_lock_irqsave(&vhub->lock, flags);
/* Only one request can be in the queue */
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
/* Is it ours ? */
if (req && u_req == &req->req) {
EPVDBG(ep, "dequeue req @%p\n", req);
/*
* We don't have to deal with "active" as all
* DMAs go to the EP buffers, not the request.
*/
ast_vhub_done(ep, req, -ECONNRESET);
/* We do stall the EP to clean things up in HW */
writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
ep->ep0.state = ep0_state_status;
ep->ep0.dir_in = false;
rc = 0;
}
spin_unlock_irqrestore(&vhub->lock, flags);
return rc;
}
static const struct usb_ep_ops ast_vhub_ep0_ops = {
.queue = ast_vhub_ep0_queue,
.dequeue = ast_vhub_ep0_dequeue,
.alloc_request = ast_vhub_alloc_request,
.free_request = ast_vhub_free_request,
};
void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
struct ast_vhub_dev *dev)
{
memset(ep, 0, sizeof(*ep));
INIT_LIST_HEAD(&ep->ep.ep_list);
INIT_LIST_HEAD(&ep->queue);
ep->ep.ops = &ast_vhub_ep0_ops;
ep->ep.name = "ep0";
ep->ep.caps.type_control = true;
usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
ep->d_idx = 0;
ep->dev = dev;
ep->vhub = vhub;
ep->ep0.state = ep0_state_token;
INIT_LIST_HEAD(&ep->ep0.req.queue);
ep->ep0.req.internal = true;
/* Small difference between vHub and devices */
if (dev) {
ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
ep->ep0.setup = vhub->regs +
AST_VHUB_SETUP0 + 8 * (dev->index + 1);
ep->buf = vhub->ep0_bufs +
AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
ep->buf_dma = vhub->ep0_bufs_dma +
AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
} else {
ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
ep->buf = vhub->ep0_bufs;
ep->buf_dma = vhub->ep0_bufs_dma;
}
}

View File

@@ -0,0 +1,843 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
*
* epn.c - Generic endpoints management
*
* Copyright 2017 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include "vhub.h"
#define EXTRA_CHECKS
#ifdef EXTRA_CHECKS
#define CHECK(ep, expr, fmt...) \
do { \
if (!(expr)) EPDBG(ep, "CHECK:" fmt); \
} while(0)
#else
#define CHECK(ep, expr, fmt...) do { } while(0)
#endif
static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
{
unsigned int act = req->req.actual;
unsigned int len = req->req.length;
unsigned int chunk;
/* There should be no DMA ongoing */
WARN_ON(req->active);
/* Calculate next chunk size */
chunk = len - act;
if (chunk > ep->ep.maxpacket)
chunk = ep->ep.maxpacket;
else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
req->last_desc = 1;
EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n",
req, act, len, chunk, req->last_desc);
/* If DMA unavailable, using staging EP buffer */
if (!req->req.dma) {
/* For IN transfers, copy data over first */
if (ep->epn.is_in)
memcpy(ep->buf, req->req.buf + act, chunk);
writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
} else
writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
/* Start DMA */
req->active = true;
writel(VHUB_EP_DMA_SET_TX_SIZE(chunk),
ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK,
ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
}
static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep)
{
struct ast_vhub_req *req;
unsigned int len;
u32 stat;
/* Read EP status */
stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
/* Grab current request if any */
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n",
stat, ep->epn.is_in, req, req ? req->active : 0);
/* In absence of a request, bail out, must have been dequeued */
if (!req)
return;
/*
* Request not active, move on to processing queue, active request
* was probably dequeued
*/
if (!req->active)
goto next_chunk;
/* Check if HW has moved on */
if (VHUB_EP_DMA_RPTR(stat) != 0) {
EPDBG(ep, "DMA read pointer not 0 !\n");
return;
}
/* No current DMA ongoing */
req->active = false;
/* Grab lenght out of HW */
len = VHUB_EP_DMA_TX_SIZE(stat);
/* If not using DMA, copy data out if needed */
if (!req->req.dma && !ep->epn.is_in && len)
memcpy(req->req.buf + req->req.actual, ep->buf, len);
/* Adjust size */
req->req.actual += len;
/* Check for short packet */
if (len < ep->ep.maxpacket)
req->last_desc = 1;
/* That's it ? complete the request and pick a new one */
if (req->last_desc >= 0) {
ast_vhub_done(ep, req, 0);
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
queue);
/*
* Due to lock dropping inside "done" the next request could
* already be active, so check for that and bail if needed.
*/
if (!req || req->active)
return;
}
next_chunk:
ast_vhub_epn_kick(ep, req);
}
static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
{
/*
* d_next == d_last means descriptor list empty to HW,
* thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors
* in the list
*/
return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) &
(AST_VHUB_DESCS_COUNT - 1);
}
static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
struct ast_vhub_req *req)
{
unsigned int act = req->act_count;
unsigned int len = req->req.length;
unsigned int chunk;
/* Mark request active if not already */
req->active = true;
/* If the request was already completely written, do nothing */
if (req->last_desc >= 0)
return;
EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n",
act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep));
/* While we can create descriptors */
while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
struct ast_vhub_desc *desc;
unsigned int d_num;
/* Grab next free descriptor */
d_num = ep->epn.d_next;
desc = &ep->epn.descs[d_num];
ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
/* Calculate next chunk size */
chunk = len - act;
if (chunk <= ep->epn.chunk_max) {
/*
* Is this the last packet ? Because of having up to 8
* packets in a descriptor we can't just compare "chunk"
* with ep.maxpacket. We have to see if it's a multiple
* of it to know if we have to send a zero packet.
* Sadly that involves a modulo which is a bit expensive
* but probably still better than not doing it.
*/
if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
req->last_desc = d_num;
} else {
chunk = ep->epn.chunk_max;
}
EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n",
act, len, chunk, req->last_desc, d_num,
ast_vhub_count_free_descs(ep));
/* Populate descriptor */
desc->w0 = cpu_to_le32(req->req.dma + act);
/* Interrupt if end of request or no more descriptors */
/*
* TODO: Be smarter about it, if we don't have enough
* descriptors request an interrupt before queue empty
* or so in order to be able to populate more before
* the HW runs out. This isn't a problem at the moment
* as we use 256 descriptors and only put at most one
* request in the ring.
*/
desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk));
if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT);
/* Account packet */
req->act_count = act = act + chunk;
}
/* Tell HW about new descriptors */
writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n",
ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS));
}
static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep)
{
struct ast_vhub_req *req;
unsigned int len, d_last;
u32 stat, stat1;
/* Read EP status, workaround HW race */
do {
stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
} while(stat != stat1);
/* Extract RPTR */
d_last = VHUB_EP_DMA_RPTR(stat);
/* Grab current request if any */
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n",
stat, ep->epn.is_in, ep->epn.d_last, d_last);
/* Check all completed descriptors */
while (ep->epn.d_last != d_last) {
struct ast_vhub_desc *desc;
unsigned int d_num;
bool is_last_desc;
/* Grab next completed descriptor */
d_num = ep->epn.d_last;
desc = &ep->epn.descs[d_num];
ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
/* Grab len out of descriptor */
len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1));
EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n",
d_num, len, req, req ? req->active : 0);
/* If no active request pending, move on */
if (!req || !req->active)
continue;
/* Adjust size */
req->req.actual += len;
/* Is that the last chunk ? */
is_last_desc = req->last_desc == d_num;
CHECK(ep, is_last_desc == (len < ep->ep.maxpacket ||
(req->req.actual >= req->req.length &&
!req->req.zero)),
"Last packet discrepancy: last_desc=%d len=%d r.act=%d "
"r.len=%d r.zero=%d mp=%d\n",
is_last_desc, len, req->req.actual, req->req.length,
req->req.zero, ep->ep.maxpacket);
if (is_last_desc) {
/*
* Because we can only have one request at a time
* in our descriptor list in this implementation,
* d_last and ep->d_last should now be equal
*/
CHECK(ep, d_last == ep->epn.d_last,
"DMA read ptr mismatch %d vs %d\n",
d_last, ep->epn.d_last);
/* Note: done will drop and re-acquire the lock */
ast_vhub_done(ep, req, 0);
req = list_first_entry_or_null(&ep->queue,
struct ast_vhub_req,
queue);
break;
}
}
/* More work ? */
if (req)
ast_vhub_epn_kick_desc(ep, req);
}
void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep)
{
if (ep->epn.desc_mode)
ast_vhub_epn_handle_ack_desc(ep);
else
ast_vhub_epn_handle_ack(ep);
}
static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
gfp_t gfp_flags)
{
struct ast_vhub_req *req = to_ast_req(u_req);
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
unsigned long flags;
bool empty;
int rc;
/* Paranoid checks */
if (!u_req || !u_req->complete || !u_req->buf) {
dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req);
if (u_req) {
dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n",
u_req->complete, req->internal);
}
return -EINVAL;
}
/* Endpoint enabled ? */
if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
!ep->dev->enabled || ep->dev->suspended) {
EPDBG(ep,"Enqueing request on wrong or disabled EP\n");
return -ESHUTDOWN;
}
/* Map request for DMA if possible. For now, the rule for DMA is
* that:
*
* * For single stage mode (no descriptors):
*
* - The buffer is aligned to a 8 bytes boundary (HW requirement)
* - For a OUT endpoint, the request size is a multiple of the EP
* packet size (otherwise the controller will DMA past the end
* of the buffer if the host is sending a too long packet).
*
* * For descriptor mode (tx only for now), always.
*
* We could relax the latter by making the decision to use the bounce
* buffer based on the size of a given *segment* of the request rather
* than the whole request.
*/
if (ep->epn.desc_mode ||
((((unsigned long)u_req->buf & 7) == 0) &&
(ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
ep->epn.is_in);
if (rc) {
dev_warn(&vhub->pdev->dev,
"Request mapping failure %d\n", rc);
return rc;
}
} else
u_req->dma = 0;
EPVDBG(ep, "enqueue req @%p\n", req);
EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n",
u_req->length, (u32)u_req->dma, u_req->zero,
u_req->short_not_ok, u_req->no_interrupt,
ep->epn.is_in);
/* Initialize request progress fields */
u_req->status = -EINPROGRESS;
u_req->actual = 0;
req->act_count = 0;
req->active = false;
req->last_desc = -1;
spin_lock_irqsave(&vhub->lock, flags);
empty = list_empty(&ep->queue);
/* Add request to list and kick processing if empty */
list_add_tail(&req->queue, &ep->queue);
if (empty) {
if (ep->epn.desc_mode)
ast_vhub_epn_kick_desc(ep, req);
else
ast_vhub_epn_kick(ep, req);
}
spin_unlock_irqrestore(&vhub->lock, flags);
return 0;
}
static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
bool restart_ep)
{
u32 state, reg, loops;
/* Stop DMA activity */
writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Wait for it to complete */
for (loops = 0; loops < 1000; loops++) {
state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
state = VHUB_EP_DMA_PROC_STATUS(state);
if (state == EP_DMA_PROC_RX_IDLE ||
state == EP_DMA_PROC_TX_IDLE)
break;
udelay(1);
}
if (loops >= 1000)
dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n");
/* If we don't have to restart the endpoint, that's it */
if (!restart_ep)
return;
/* Restart the endpoint */
if (ep->epn.desc_mode) {
/*
* Take out descriptors by resetting the DMA read
* pointer to be equal to the CPU write pointer.
*
* Note: If we ever support creating descriptors for
* requests that aren't the head of the queue, we
* may have to do something more complex here,
* especially if the request being taken out is
* not the current head descriptors.
*/
reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) |
VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next);
writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
/* Then turn it back on */
writel(ep->epn.dma_conf,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
} else {
/* Single mode: just turn it back on */
writel(ep->epn.dma_conf,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
}
}
static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
struct ast_vhub_req *req;
unsigned long flags;
int rc = -EINVAL;
spin_lock_irqsave(&vhub->lock, flags);
/* Make sure it's actually queued on this endpoint */
list_for_each_entry (req, &ep->queue, queue) {
if (&req->req == u_req)
break;
}
if (&req->req == u_req) {
EPVDBG(ep, "dequeue req @%p active=%d\n",
req, req->active);
if (req->active)
ast_vhub_stop_active_req(ep, true);
ast_vhub_done(ep, req, -ECONNRESET);
rc = 0;
}
spin_unlock_irqrestore(&vhub->lock, flags);
return rc;
}
void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep)
{
u32 reg;
if (WARN_ON(ep->d_idx == 0))
return;
reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG);
if (ep->epn.stalled || ep->epn.wedged)
reg |= VHUB_EP_CFG_STALL_CTRL;
else
reg &= ~VHUB_EP_CFG_STALL_CTRL;
writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG);
if (!ep->epn.stalled && !ep->epn.wedged)
writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
ep->vhub->regs + AST_VHUB_EP_TOGGLE);
}
static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt,
bool wedge)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
unsigned long flags;
EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge);
if (!u_ep || !u_ep->desc)
return -EINVAL;
if (ep->d_idx == 0)
return 0;
if (ep->epn.is_iso)
return -EOPNOTSUPP;
spin_lock_irqsave(&vhub->lock, flags);
/* Fail with still-busy IN endpoints */
if (halt && ep->epn.is_in && !list_empty(&ep->queue)) {
spin_unlock_irqrestore(&vhub->lock, flags);
return -EAGAIN;
}
ep->epn.stalled = halt;
ep->epn.wedged = wedge;
ast_vhub_update_epn_stall(ep);
spin_unlock_irqrestore(&vhub->lock, flags);
return 0;
}
static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value)
{
return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false);
}
static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep)
{
return ast_vhub_set_halt_and_wedge(u_ep, true, true);
}
static int ast_vhub_epn_disable(struct usb_ep* u_ep)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub *vhub = ep->vhub;
unsigned long flags;
u32 imask, ep_ier;
EPDBG(ep, "Disabling !\n");
spin_lock_irqsave(&vhub->lock, flags);
ep->epn.enabled = false;
/* Stop active DMA if any */
ast_vhub_stop_active_req(ep, false);
/* Disable endpoint */
writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
/* Disable ACK interrupt */
imask = VHUB_EP_IRQ(ep->epn.g_idx);
ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
ep_ier &= ~imask;
writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
/* Nuke all pending requests */
ast_vhub_nuke(ep, -ESHUTDOWN);
/* No more descriptor associated with request */
ep->ep.desc = NULL;
spin_unlock_irqrestore(&vhub->lock, flags);
return 0;
}
static int ast_vhub_epn_enable(struct usb_ep* u_ep,
const struct usb_endpoint_descriptor *desc)
{
static const char *ep_type_string[] __maybe_unused = { "ctrl",
"isoc",
"bulk",
"intr" };
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
struct ast_vhub_dev *dev;
struct ast_vhub *vhub;
u16 maxpacket, type;
unsigned long flags;
u32 ep_conf, ep_ier, imask;
/* Check arguments */
if (!u_ep || !desc)
return -EINVAL;
maxpacket = usb_endpoint_maxp(desc);
if (!ep->d_idx || !ep->dev ||
desc->bDescriptorType != USB_DT_ENDPOINT ||
maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n",
ep->d_idx, ep->dev, desc->bDescriptorType,
maxpacket, ep->ep.maxpacket);
return -EINVAL;
}
if (ep->d_idx != usb_endpoint_num(desc)) {
EPDBG(ep, "EP number mismatch !\n");
return -EINVAL;
}
if (ep->epn.enabled) {
EPDBG(ep, "Already enabled\n");
return -EBUSY;
}
dev = ep->dev;
vhub = ep->vhub;
/* Check device state */
if (!dev->driver) {
EPDBG(ep, "Bogus device state: driver=%p speed=%d\n",
dev->driver, dev->gadget.speed);
return -ESHUTDOWN;
}
/* Grab some info from the descriptor */
ep->epn.is_in = usb_endpoint_dir_in(desc);
ep->ep.maxpacket = maxpacket;
type = usb_endpoint_type(desc);
ep->epn.d_next = ep->epn.d_last = 0;
ep->epn.is_iso = false;
ep->epn.stalled = false;
ep->epn.wedged = false;
EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n",
ep->epn.is_in ? "in" : "out", ep_type_string[type],
usb_endpoint_num(desc), maxpacket);
/* Can we use DMA descriptor mode ? */
ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in;
if (ep->epn.desc_mode)
memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT);
/*
* Large send function can send up to 8 packets from
* one descriptor with a limit of 4095 bytes.
*/
ep->epn.chunk_max = ep->ep.maxpacket;
if (ep->epn.is_in) {
ep->epn.chunk_max <<= 3;
while (ep->epn.chunk_max > 4095)
ep->epn.chunk_max -= ep->ep.maxpacket;
}
switch(type) {
case USB_ENDPOINT_XFER_CONTROL:
EPDBG(ep, "Only one control endpoint\n");
return -EINVAL;
case USB_ENDPOINT_XFER_INT:
ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT);
break;
case USB_ENDPOINT_XFER_BULK:
ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK);
break;
case USB_ENDPOINT_XFER_ISOC:
ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO);
ep->epn.is_iso = true;
break;
default:
return -EINVAL;
}
/* Encode the rest of the EP config register */
if (maxpacket < 1024)
ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket);
if (!ep->epn.is_in)
ep_conf |= VHUB_EP_CFG_DIR_OUT;
ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc));
ep_conf |= VHUB_EP_CFG_ENABLE;
ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1);
EPVDBG(ep, "config=%08x\n", ep_conf);
spin_lock_irqsave(&vhub->lock, flags);
/* Disable HW and reset DMA */
writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
writel(VHUB_EP_DMA_CTRL_RESET,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Configure and enable */
writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG);
if (ep->epn.desc_mode) {
/* Clear DMA status, including the DMA read ptr */
writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
/* Set descriptor base */
writel(ep->epn.descs_dma,
ep->epn.regs + AST_VHUB_EP_DESC_BASE);
/* Set base DMA config value */
ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE;
if (ep->epn.is_in)
ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE;
/* First reset and disable all operations */
writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Enable descriptor mode */
writel(ep->epn.dma_conf,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
} else {
/* Set base DMA config value */
ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE;
/* Reset and switch to single stage mode */
writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
writel(ep->epn.dma_conf,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
}
/* Cleanup data toggle just in case */
writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
vhub->regs + AST_VHUB_EP_TOGGLE);
/* Cleanup and enable ACK interrupt */
imask = VHUB_EP_IRQ(ep->epn.g_idx);
writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
ep_ier |= imask;
writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
/* Woot, we are online ! */
ep->epn.enabled = true;
spin_unlock_irqrestore(&vhub->lock, flags);
return 0;
}
static void ast_vhub_epn_dispose(struct usb_ep *u_ep)
{
struct ast_vhub_ep *ep = to_ast_ep(u_ep);
if (WARN_ON(!ep->dev || !ep->d_idx))
return;
EPDBG(ep, "Releasing endpoint\n");
/* Take it out of the EP list */
list_del_init(&ep->ep.ep_list);
/* Mark the address free in the device */
ep->dev->epns[ep->d_idx - 1] = NULL;
/* Free name & DMA buffers */
kfree(ep->ep.name);
ep->ep.name = NULL;
dma_free_coherent(&ep->vhub->pdev->dev,
AST_VHUB_EPn_MAX_PACKET +
8 * AST_VHUB_DESCS_COUNT,
ep->buf, ep->buf_dma);
ep->buf = NULL;
ep->epn.descs = NULL;
/* Mark free */
ep->dev = NULL;
}
static const struct usb_ep_ops ast_vhub_epn_ops = {
.enable = ast_vhub_epn_enable,
.disable = ast_vhub_epn_disable,
.dispose = ast_vhub_epn_dispose,
.queue = ast_vhub_epn_queue,
.dequeue = ast_vhub_epn_dequeue,
.set_halt = ast_vhub_epn_set_halt,
.set_wedge = ast_vhub_epn_set_wedge,
.alloc_request = ast_vhub_alloc_request,
.free_request = ast_vhub_free_request,
};
struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
{
struct ast_vhub *vhub = d->vhub;
struct ast_vhub_ep *ep;
unsigned long flags;
int i;
/* Find a free one (no device) */
spin_lock_irqsave(&vhub->lock, flags);
for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++)
if (vhub->epns[i].dev == NULL)
break;
if (i >= AST_VHUB_NUM_GEN_EPs) {
spin_unlock_irqrestore(&vhub->lock, flags);
return NULL;
}
/* Set it up */
ep = &vhub->epns[i];
ep->dev = d;
spin_unlock_irqrestore(&vhub->lock, flags);
DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr);
INIT_LIST_HEAD(&ep->queue);
ep->d_idx = addr;
ep->vhub = vhub;
ep->ep.ops = &ast_vhub_epn_ops;
ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr);
d->epns[addr-1] = ep;
ep->epn.g_idx = i;
ep->epn.regs = vhub->regs + 0x200 + (i * 0x10);
ep->buf = dma_alloc_coherent(&vhub->pdev->dev,
AST_VHUB_EPn_MAX_PACKET +
8 * AST_VHUB_DESCS_COUNT,
&ep->buf_dma, GFP_KERNEL);
if (!ep->buf) {
kfree(ep->ep.name);
ep->ep.name = NULL;
return NULL;
}
ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET;
ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET;
usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET);
list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list);
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
return ep;
}

View File

@@ -0,0 +1,829 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
*
* hub.c - virtual hub handling
*
* Copyright 2017 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include <linux/bcd.h>
#include <linux/version.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "vhub.h"
/* usb 2.0 hub device descriptor
*
* A few things we may want to improve here:
*
* - We may need to indicate TT support
* - We may need a device qualifier descriptor
* as devices can pretend to be usb1 or 2
* - Make vid/did overridable
* - make it look like usb1 if usb1 mode forced
*/
#define KERNEL_REL bin2bcd(((LINUX_VERSION_CODE >> 16) & 0x0ff))
#define KERNEL_VER bin2bcd(((LINUX_VERSION_CODE >> 8) & 0x0ff))
enum {
AST_VHUB_STR_MANUF = 3,
AST_VHUB_STR_PRODUCT = 2,
AST_VHUB_STR_SERIAL = 1,
};
static const struct usb_device_descriptor ast_vhub_dev_desc = {
.bLength = USB_DT_DEVICE_SIZE,
.bDescriptorType = USB_DT_DEVICE,
.bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_HUB,
.bDeviceSubClass = 0,
.bDeviceProtocol = 1,
.bMaxPacketSize0 = 64,
.idVendor = cpu_to_le16(0x1d6b),
.idProduct = cpu_to_le16(0x0107),
.bcdDevice = cpu_to_le16(0x0100),
.iManufacturer = AST_VHUB_STR_MANUF,
.iProduct = AST_VHUB_STR_PRODUCT,
.iSerialNumber = AST_VHUB_STR_SERIAL,
.bNumConfigurations = 1,
};
/* Patches to the above when forcing USB1 mode */
static void ast_vhub_patch_dev_desc_usb1(struct usb_device_descriptor *desc)
{
desc->bcdUSB = cpu_to_le16(0x0100);
desc->bDeviceProtocol = 0;
}
/*
* Configuration descriptor: same comments as above
* regarding handling USB1 mode.
*/
/*
* We don't use sizeof() as Linux definition of
* struct usb_endpoint_descriptor contains 2
* extra bytes
*/
#define AST_VHUB_CONF_DESC_SIZE (USB_DT_CONFIG_SIZE + \
USB_DT_INTERFACE_SIZE + \
USB_DT_ENDPOINT_SIZE)
static const struct ast_vhub_full_cdesc {
struct usb_config_descriptor cfg;
struct usb_interface_descriptor intf;
struct usb_endpoint_descriptor ep;
} __attribute__ ((packed)) ast_vhub_conf_desc = {
.cfg = {
.bLength = USB_DT_CONFIG_SIZE,
.bDescriptorType = USB_DT_CONFIG,
.wTotalLength = cpu_to_le16(AST_VHUB_CONF_DESC_SIZE),
.bNumInterfaces = 1,
.bConfigurationValue = 1,
.iConfiguration = 0,
.bmAttributes = USB_CONFIG_ATT_ONE |
USB_CONFIG_ATT_SELFPOWER |
USB_CONFIG_ATT_WAKEUP,
.bMaxPower = 0,
},
.intf = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = 0,
.bAlternateSetting = 0,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_HUB,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
.iInterface = 0,
},
.ep = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 0x81,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(1),
.bInterval = 0x0c,
},
};
#define AST_VHUB_HUB_DESC_SIZE (USB_DT_HUB_NONVAR_SIZE + 2)
static const struct usb_hub_descriptor ast_vhub_hub_desc = {
.bDescLength = AST_VHUB_HUB_DESC_SIZE,
.bDescriptorType = USB_DT_HUB,
.bNbrPorts = AST_VHUB_NUM_PORTS,
.wHubCharacteristics = cpu_to_le16(HUB_CHAR_NO_LPSM),
.bPwrOn2PwrGood = 10,
.bHubContrCurrent = 0,
.u.hs.DeviceRemovable[0] = 0,
.u.hs.DeviceRemovable[1] = 0xff,
};
/*
* These strings converted to UTF-16 must be smaller than
* our EP0 buffer.
*/
static const struct usb_string ast_vhub_str_array[] = {
{
.id = AST_VHUB_STR_SERIAL,
.s = "00000000"
},
{
.id = AST_VHUB_STR_PRODUCT,
.s = "USB Virtual Hub"
},
{
.id = AST_VHUB_STR_MANUF,
.s = "Aspeed"
},
{ }
};
static const struct usb_gadget_strings ast_vhub_strings = {
.language = 0x0409,
.strings = (struct usb_string *)ast_vhub_str_array
};
static int ast_vhub_hub_dev_status(struct ast_vhub_ep *ep,
u16 wIndex, u16 wValue)
{
u8 st0;
EPDBG(ep, "GET_STATUS(dev)\n");
/*
* Mark it as self-powered, I doubt the BMC is powered off
* the USB bus ...
*/
st0 = 1 << USB_DEVICE_SELF_POWERED;
/*
* Need to double check how remote wakeup actually works
* on that chip and what triggers it.
*/
if (ep->vhub->wakeup_en)
st0 |= 1 << USB_DEVICE_REMOTE_WAKEUP;
return ast_vhub_simple_reply(ep, st0, 0);
}
static int ast_vhub_hub_ep_status(struct ast_vhub_ep *ep,
u16 wIndex, u16 wValue)
{
int ep_num;
u8 st0 = 0;
ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
EPDBG(ep, "GET_STATUS(ep%d)\n", ep_num);
/* On the hub we have only EP 0 and 1 */
if (ep_num == 1) {
if (ep->vhub->ep1_stalled)
st0 |= 1 << USB_ENDPOINT_HALT;
} else if (ep_num != 0)
return std_req_stall;
return ast_vhub_simple_reply(ep, st0, 0);
}
static int ast_vhub_hub_dev_feature(struct ast_vhub_ep *ep,
u16 wIndex, u16 wValue,
bool is_set)
{
EPDBG(ep, "%s_FEATURE(dev val=%02x)\n",
is_set ? "SET" : "CLEAR", wValue);
if (wValue != USB_DEVICE_REMOTE_WAKEUP)
return std_req_stall;
ep->vhub->wakeup_en = is_set;
EPDBG(ep, "Hub remote wakeup %s\n",
is_set ? "enabled" : "disabled");
return std_req_complete;
}
static int ast_vhub_hub_ep_feature(struct ast_vhub_ep *ep,
u16 wIndex, u16 wValue,
bool is_set)
{
int ep_num;
u32 reg;
ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
EPDBG(ep, "%s_FEATURE(ep%d val=%02x)\n",
is_set ? "SET" : "CLEAR", ep_num, wValue);
if (ep_num > 1)
return std_req_stall;
if (wValue != USB_ENDPOINT_HALT)
return std_req_stall;
if (ep_num == 0)
return std_req_complete;
EPDBG(ep, "%s stall on EP 1\n",
is_set ? "setting" : "clearing");
ep->vhub->ep1_stalled = is_set;
reg = readl(ep->vhub->regs + AST_VHUB_EP1_CTRL);
if (is_set) {
reg |= VHUB_EP1_CTRL_STALL;
} else {
reg &= ~VHUB_EP1_CTRL_STALL;
reg |= VHUB_EP1_CTRL_RESET_TOGGLE;
}
writel(reg, ep->vhub->regs + AST_VHUB_EP1_CTRL);
return std_req_complete;
}
static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
u8 desc_type, u16 len)
{
size_t dsize;
EPDBG(ep, "GET_DESCRIPTOR(type:%d)\n", desc_type);
/*
* Copy first to EP buffer and send from there, so
* we can do some in-place patching if needed. We know
* the EP buffer is big enough but ensure that doesn't
* change. We do that now rather than later after we
* have checked sizes etc... to avoid a gcc bug where
* it thinks len is constant and barfs about read
* overflows in memcpy.
*/
switch(desc_type) {
case USB_DT_DEVICE:
dsize = USB_DT_DEVICE_SIZE;
memcpy(ep->buf, &ast_vhub_dev_desc, dsize);
BUILD_BUG_ON(dsize > sizeof(ast_vhub_dev_desc));
BUILD_BUG_ON(USB_DT_DEVICE_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
case USB_DT_CONFIG:
dsize = AST_VHUB_CONF_DESC_SIZE;
memcpy(ep->buf, &ast_vhub_conf_desc, dsize);
BUILD_BUG_ON(dsize > sizeof(ast_vhub_conf_desc));
BUILD_BUG_ON(AST_VHUB_CONF_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
case USB_DT_HUB:
dsize = AST_VHUB_HUB_DESC_SIZE;
memcpy(ep->buf, &ast_vhub_hub_desc, dsize);
BUILD_BUG_ON(dsize > sizeof(ast_vhub_hub_desc));
BUILD_BUG_ON(AST_VHUB_HUB_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
default:
return std_req_stall;
}
/* Crop requested length */
if (len > dsize)
len = dsize;
/* Patch it if forcing USB1 */
if (desc_type == USB_DT_DEVICE && ep->vhub->force_usb1)
ast_vhub_patch_dev_desc_usb1(ep->buf);
/* Shoot it from the EP buffer */
return ast_vhub_reply(ep, NULL, len);
}
static int ast_vhub_rep_string(struct ast_vhub_ep *ep,
u8 string_id, u16 lang_id,
u16 len)
{
int rc = usb_gadget_get_string (&ast_vhub_strings, string_id, ep->buf);
/*
* This should never happen unless we put too big strings in
* the array above
*/
BUG_ON(rc >= AST_VHUB_EP0_MAX_PACKET);
if (rc < 0)
return std_req_stall;
/* Shoot it from the EP buffer */
return ast_vhub_reply(ep, NULL, min_t(u16, rc, len));
}
enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq)
{
struct ast_vhub *vhub = ep->vhub;
u16 wValue, wIndex, wLength;
wValue = le16_to_cpu(crq->wValue);
wIndex = le16_to_cpu(crq->wIndex);
wLength = le16_to_cpu(crq->wLength);
/* First packet, grab speed */
if (vhub->speed == USB_SPEED_UNKNOWN) {
u32 ustat = readl(vhub->regs + AST_VHUB_USBSTS);
if (ustat & VHUB_USBSTS_HISPEED)
vhub->speed = USB_SPEED_HIGH;
else
vhub->speed = USB_SPEED_FULL;
UDCDBG(vhub, "USB status=%08x speed=%s\n", ustat,
vhub->speed == USB_SPEED_HIGH ? "high" : "full");
}
switch ((crq->bRequestType << 8) | crq->bRequest) {
/* SET_ADDRESS */
case DeviceOutRequest | USB_REQ_SET_ADDRESS:
EPDBG(ep, "SET_ADDRESS: Got address %x\n", wValue);
writel(wValue, vhub->regs + AST_VHUB_CONF);
return std_req_complete;
/* GET_STATUS */
case DeviceRequest | USB_REQ_GET_STATUS:
return ast_vhub_hub_dev_status(ep, wIndex, wValue);
case InterfaceRequest | USB_REQ_GET_STATUS:
return ast_vhub_simple_reply(ep, 0, 0);
case EndpointRequest | USB_REQ_GET_STATUS:
return ast_vhub_hub_ep_status(ep, wIndex, wValue);
/* SET/CLEAR_FEATURE */
case DeviceOutRequest | USB_REQ_SET_FEATURE:
return ast_vhub_hub_dev_feature(ep, wIndex, wValue, true);
case DeviceOutRequest | USB_REQ_CLEAR_FEATURE:
return ast_vhub_hub_dev_feature(ep, wIndex, wValue, false);
case EndpointOutRequest | USB_REQ_SET_FEATURE:
return ast_vhub_hub_ep_feature(ep, wIndex, wValue, true);
case EndpointOutRequest | USB_REQ_CLEAR_FEATURE:
return ast_vhub_hub_ep_feature(ep, wIndex, wValue, false);
/* GET/SET_CONFIGURATION */
case DeviceRequest | USB_REQ_GET_CONFIGURATION:
return ast_vhub_simple_reply(ep, 1);
case DeviceOutRequest | USB_REQ_SET_CONFIGURATION:
if (wValue != 1)
return std_req_stall;
return std_req_complete;
/* GET_DESCRIPTOR */
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
switch (wValue >> 8) {
case USB_DT_DEVICE:
case USB_DT_CONFIG:
return ast_vhub_rep_desc(ep, wValue >> 8,
wLength);
case USB_DT_STRING:
return ast_vhub_rep_string(ep, wValue & 0xff,
wIndex, wLength);
}
return std_req_stall;
/* GET/SET_INTERFACE */
case DeviceRequest | USB_REQ_GET_INTERFACE:
return ast_vhub_simple_reply(ep, 0);
case DeviceOutRequest | USB_REQ_SET_INTERFACE:
if (wValue != 0 || wIndex != 0)
return std_req_stall;
return std_req_complete;
}
return std_req_stall;
}
static void ast_vhub_update_hub_ep1(struct ast_vhub *vhub,
unsigned int port)
{
/* Update HW EP1 response */
u32 reg = readl(vhub->regs + AST_VHUB_EP1_STS_CHG);
u32 pmask = (1 << (port + 1));
if (vhub->ports[port].change)
reg |= pmask;
else
reg &= ~pmask;
writel(reg, vhub->regs + AST_VHUB_EP1_STS_CHG);
}
static void ast_vhub_change_port_stat(struct ast_vhub *vhub,
unsigned int port,
u16 clr_flags,
u16 set_flags,
bool set_c)
{
struct ast_vhub_port *p = &vhub->ports[port];
u16 prev;
/* Update port status */
prev = p->status;
p->status = (prev & ~clr_flags) | set_flags;
DDBG(&p->dev, "port %d status %04x -> %04x (C=%d)\n",
port + 1, prev, p->status, set_c);
/* Update change bits if needed */
if (set_c) {
u16 chg = p->status ^ prev;
/* Only these are relevant for change */
chg &= USB_PORT_STAT_C_CONNECTION |
USB_PORT_STAT_C_ENABLE |
USB_PORT_STAT_C_SUSPEND |
USB_PORT_STAT_C_OVERCURRENT |
USB_PORT_STAT_C_RESET |
USB_PORT_STAT_C_L1;
p->change |= chg;
ast_vhub_update_hub_ep1(vhub, port);
}
}
static void ast_vhub_send_host_wakeup(struct ast_vhub *vhub)
{
u32 reg = readl(vhub->regs + AST_VHUB_CTRL);
UDCDBG(vhub, "Waking up host !\n");
reg |= VHUB_CTRL_MANUAL_REMOTE_WAKEUP;
writel(reg, vhub->regs + AST_VHUB_CTRL);
}
void ast_vhub_device_connect(struct ast_vhub *vhub,
unsigned int port, bool on)
{
if (on)
ast_vhub_change_port_stat(vhub, port, 0,
USB_PORT_STAT_CONNECTION, true);
else
ast_vhub_change_port_stat(vhub, port,
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_ENABLE,
0, true);
/*
* If the hub is set to wakup the host on connection events
* then send a wakeup.
*/
if (vhub->wakeup_en)
ast_vhub_send_host_wakeup(vhub);
}
static void ast_vhub_wake_work(struct work_struct *work)
{
struct ast_vhub *vhub = container_of(work,
struct ast_vhub,
wake_work);
unsigned long flags;
unsigned int i;
/*
* Wake all sleeping ports. If a port is suspended by
* the host suspend (without explicit state suspend),
* we let the normal host wake path deal with it later.
*/
spin_lock_irqsave(&vhub->lock, flags);
for (i = 0; i < AST_VHUB_NUM_PORTS; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
if (!(p->status & USB_PORT_STAT_SUSPEND))
continue;
ast_vhub_change_port_stat(vhub, i,
USB_PORT_STAT_SUSPEND,
0, true);
ast_vhub_dev_resume(&p->dev);
}
ast_vhub_send_host_wakeup(vhub);
spin_unlock_irqrestore(&vhub->lock, flags);
}
void ast_vhub_hub_wake_all(struct ast_vhub *vhub)
{
/*
* A device is trying to wake the world, because this
* can recurse into the device, we break the call chain
* using a work queue
*/
schedule_work(&vhub->wake_work);
}
static void ast_vhub_port_reset(struct ast_vhub *vhub, u8 port)
{
struct ast_vhub_port *p = &vhub->ports[port];
u16 set, clr, speed;
/* First mark disabled */
ast_vhub_change_port_stat(vhub, port,
USB_PORT_STAT_ENABLE |
USB_PORT_STAT_SUSPEND,
USB_PORT_STAT_RESET,
false);
if (!p->dev.driver)
return;
/*
* This will either "start" the port or reset the
* device if already started...
*/
ast_vhub_dev_reset(&p->dev);
/* Grab the right speed */
speed = p->dev.driver->max_speed;
if (speed == USB_SPEED_UNKNOWN || speed > vhub->speed)
speed = vhub->speed;
switch (speed) {
case USB_SPEED_LOW:
set = USB_PORT_STAT_LOW_SPEED;
clr = USB_PORT_STAT_HIGH_SPEED;
break;
case USB_SPEED_FULL:
set = 0;
clr = USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_HIGH_SPEED;
break;
case USB_SPEED_HIGH:
set = USB_PORT_STAT_HIGH_SPEED;
clr = USB_PORT_STAT_LOW_SPEED;
break;
default:
UDCDBG(vhub, "Unsupported speed %d when"
" connecting device\n",
speed);
return;
}
clr |= USB_PORT_STAT_RESET;
set |= USB_PORT_STAT_ENABLE;
/* This should ideally be delayed ... */
ast_vhub_change_port_stat(vhub, port, clr, set, true);
}
static enum std_req_rc ast_vhub_set_port_feature(struct ast_vhub_ep *ep,
u8 port, u16 feat)
{
struct ast_vhub *vhub = ep->vhub;
struct ast_vhub_port *p;
if (port == 0 || port > AST_VHUB_NUM_PORTS)
return std_req_stall;
port--;
p = &vhub->ports[port];
switch(feat) {
case USB_PORT_FEAT_SUSPEND:
if (!(p->status & USB_PORT_STAT_ENABLE))
return std_req_complete;
ast_vhub_change_port_stat(vhub, port,
0, USB_PORT_STAT_SUSPEND,
false);
ast_vhub_dev_suspend(&p->dev);
return std_req_complete;
case USB_PORT_FEAT_RESET:
EPDBG(ep, "Port reset !\n");
ast_vhub_port_reset(vhub, port);
return std_req_complete;
case USB_PORT_FEAT_POWER:
/*
* On Power-on, we mark the connected flag changed,
* if there's a connected device, some hosts will
* otherwise fail to detect it.
*/
if (p->status & USB_PORT_STAT_CONNECTION) {
p->change |= USB_PORT_STAT_C_CONNECTION;
ast_vhub_update_hub_ep1(vhub, port);
}
return std_req_complete;
case USB_PORT_FEAT_TEST:
case USB_PORT_FEAT_INDICATOR:
/* We don't do anything with these */
return std_req_complete;
}
return std_req_stall;
}
static enum std_req_rc ast_vhub_clr_port_feature(struct ast_vhub_ep *ep,
u8 port, u16 feat)
{
struct ast_vhub *vhub = ep->vhub;
struct ast_vhub_port *p;
if (port == 0 || port > AST_VHUB_NUM_PORTS)
return std_req_stall;
port--;
p = &vhub->ports[port];
switch(feat) {
case USB_PORT_FEAT_ENABLE:
ast_vhub_change_port_stat(vhub, port,
USB_PORT_STAT_ENABLE |
USB_PORT_STAT_SUSPEND, 0,
false);
ast_vhub_dev_suspend(&p->dev);
return std_req_complete;
case USB_PORT_FEAT_SUSPEND:
if (!(p->status & USB_PORT_STAT_SUSPEND))
return std_req_complete;
ast_vhub_change_port_stat(vhub, port,
USB_PORT_STAT_SUSPEND, 0,
false);
ast_vhub_dev_resume(&p->dev);
return std_req_complete;
case USB_PORT_FEAT_POWER:
/* We don't do power control */
return std_req_complete;
case USB_PORT_FEAT_INDICATOR:
/* We don't have indicators */
return std_req_complete;
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_SUSPEND:
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_RESET:
/* Clear state-change feature */
p->change &= ~(1u << (feat - 16));
ast_vhub_update_hub_ep1(vhub, port);
return std_req_complete;
}
return std_req_stall;
}
static enum std_req_rc ast_vhub_get_port_stat(struct ast_vhub_ep *ep,
u8 port)
{
struct ast_vhub *vhub = ep->vhub;
u16 stat, chg;
if (port == 0 || port > AST_VHUB_NUM_PORTS)
return std_req_stall;
port--;
stat = vhub->ports[port].status;
chg = vhub->ports[port].change;
/* We always have power */
stat |= USB_PORT_STAT_POWER;
EPDBG(ep, " port status=%04x change=%04x\n", stat, chg);
return ast_vhub_simple_reply(ep,
stat & 0xff,
stat >> 8,
chg & 0xff,
chg >> 8);
}
enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq)
{
u16 wValue, wIndex, wLength;
wValue = le16_to_cpu(crq->wValue);
wIndex = le16_to_cpu(crq->wIndex);
wLength = le16_to_cpu(crq->wLength);
switch ((crq->bRequestType << 8) | crq->bRequest) {
case GetHubStatus:
EPDBG(ep, "GetHubStatus\n");
return ast_vhub_simple_reply(ep, 0, 0, 0, 0);
case GetPortStatus:
EPDBG(ep, "GetPortStatus(%d)\n", wIndex & 0xff);
return ast_vhub_get_port_stat(ep, wIndex & 0xf);
case GetHubDescriptor:
if (wValue != (USB_DT_HUB << 8))
return std_req_stall;
EPDBG(ep, "GetHubDescriptor(%d)\n", wIndex & 0xff);
return ast_vhub_rep_desc(ep, USB_DT_HUB, wLength);
case SetHubFeature:
case ClearHubFeature:
EPDBG(ep, "Get/SetHubFeature(%d)\n", wValue);
/* No feature, just complete the requests */
if (wValue == C_HUB_LOCAL_POWER ||
wValue == C_HUB_OVER_CURRENT)
return std_req_complete;
return std_req_stall;
case SetPortFeature:
EPDBG(ep, "SetPortFeature(%d,%d)\n", wIndex & 0xf, wValue);
return ast_vhub_set_port_feature(ep, wIndex & 0xf, wValue);
case ClearPortFeature:
EPDBG(ep, "ClearPortFeature(%d,%d)\n", wIndex & 0xf, wValue);
return ast_vhub_clr_port_feature(ep, wIndex & 0xf, wValue);
default:
EPDBG(ep, "Unknown class request\n");
}
return std_req_stall;
}
void ast_vhub_hub_suspend(struct ast_vhub *vhub)
{
unsigned int i;
UDCDBG(vhub, "USB bus suspend\n");
if (vhub->suspended)
return;
vhub->suspended = true;
/*
* Forward to unsuspended ports without changing
* their connection status.
*/
for (i = 0; i < AST_VHUB_NUM_PORTS; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
if (!(p->status & USB_PORT_STAT_SUSPEND))
ast_vhub_dev_suspend(&p->dev);
}
}
void ast_vhub_hub_resume(struct ast_vhub *vhub)
{
unsigned int i;
UDCDBG(vhub, "USB bus resume\n");
if (!vhub->suspended)
return;
vhub->suspended = false;
/*
* Forward to unsuspended ports without changing
* their connection status.
*/
for (i = 0; i < AST_VHUB_NUM_PORTS; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
if (!(p->status & USB_PORT_STAT_SUSPEND))
ast_vhub_dev_resume(&p->dev);
}
}
void ast_vhub_hub_reset(struct ast_vhub *vhub)
{
unsigned int i;
UDCDBG(vhub, "USB bus reset\n");
/*
* Is the speed known ? If not we don't care, we aren't
* initialized yet and ports haven't been enabled.
*/
if (vhub->speed == USB_SPEED_UNKNOWN)
return;
/* We aren't suspended anymore obviously */
vhub->suspended = false;
/* No speed set */
vhub->speed = USB_SPEED_UNKNOWN;
/* Wakeup not enabled anymore */
vhub->wakeup_en = false;
/*
* Clear all port status, disable gadgets and "suspend"
* them. They will be woken up by a port reset.
*/
for (i = 0; i < AST_VHUB_NUM_PORTS; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
/* Only keep the connected flag */
p->status &= USB_PORT_STAT_CONNECTION;
p->change = 0;
/* Suspend the gadget if any */
ast_vhub_dev_suspend(&p->dev);
}
/* Cleanup HW */
writel(0, vhub->regs + AST_VHUB_CONF);
writel(0, vhub->regs + AST_VHUB_EP0_CTRL);
writel(VHUB_EP1_CTRL_RESET_TOGGLE |
VHUB_EP1_CTRL_ENABLE,
vhub->regs + AST_VHUB_EP1_CTRL);
writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG);
}
void ast_vhub_init_hub(struct ast_vhub *vhub)
{
vhub->speed = USB_SPEED_UNKNOWN;
INIT_WORK(&vhub->wake_work, ast_vhub_wake_work);
}

View File

@@ -0,0 +1,514 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __ASPEED_VHUB_H
#define __ASPEED_VHUB_H
/*****************************
* *
* VHUB register definitions *
* *
*****************************/
#define AST_VHUB_CTRL 0x00 /* Root Function Control & Status Register */
#define AST_VHUB_CONF 0x04 /* Root Configuration Setting Register */
#define AST_VHUB_IER 0x08 /* Interrupt Ctrl Register */
#define AST_VHUB_ISR 0x0C /* Interrupt Status Register */
#define AST_VHUB_EP_ACK_IER 0x10 /* Programmable Endpoint Pool ACK Interrupt Enable Register */
#define AST_VHUB_EP_NACK_IER 0x14 /* Programmable Endpoint Pool NACK Interrupt Enable Register */
#define AST_VHUB_EP_ACK_ISR 0x18 /* Programmable Endpoint Pool ACK Interrupt Status Register */
#define AST_VHUB_EP_NACK_ISR 0x1C /* Programmable Endpoint Pool NACK Interrupt Status Register */
#define AST_VHUB_SW_RESET 0x20 /* Device Controller Soft Reset Enable Register */
#define AST_VHUB_USBSTS 0x24 /* USB Status Register */
#define AST_VHUB_EP_TOGGLE 0x28 /* Programmable Endpoint Pool Data Toggle Value Set */
#define AST_VHUB_ISO_FAIL_ACC 0x2C /* Isochronous Transaction Fail Accumulator */
#define AST_VHUB_EP0_CTRL 0x30 /* Endpoint 0 Contrl/Status Register */
#define AST_VHUB_EP0_DATA 0x34 /* Base Address of Endpoint 0 In/OUT Data Buffer Register */
#define AST_VHUB_EP1_CTRL 0x38 /* Endpoint 1 Contrl/Status Register */
#define AST_VHUB_EP1_STS_CHG 0x3C /* Endpoint 1 Status Change Bitmap Data */
#define AST_VHUB_SETUP0 0x80 /* Root Device Setup Data Buffer0 */
#define AST_VHUB_SETUP1 0x84 /* Root Device Setup Data Buffer1 */
/* Main control reg */
#define VHUB_CTRL_PHY_CLK (1 << 31)
#define VHUB_CTRL_PHY_LOOP_TEST (1 << 25)
#define VHUB_CTRL_DN_PWN (1 << 24)
#define VHUB_CTRL_DP_PWN (1 << 23)
#define VHUB_CTRL_LONG_DESC (1 << 18)
#define VHUB_CTRL_ISO_RSP_CTRL (1 << 17)
#define VHUB_CTRL_SPLIT_IN (1 << 16)
#define VHUB_CTRL_LOOP_T_RESULT (1 << 15)
#define VHUB_CTRL_LOOP_T_STS (1 << 14)
#define VHUB_CTRL_PHY_BIST_RESULT (1 << 13)
#define VHUB_CTRL_PHY_BIST_CTRL (1 << 12)
#define VHUB_CTRL_PHY_RESET_DIS (1 << 11)
#define VHUB_CTRL_SET_TEST_MODE(x) ((x) << 8)
#define VHUB_CTRL_MANUAL_REMOTE_WAKEUP (1 << 4)
#define VHUB_CTRL_AUTO_REMOTE_WAKEUP (1 << 3)
#define VHUB_CTRL_CLK_STOP_SUSPEND (1 << 2)
#define VHUB_CTRL_FULL_SPEED_ONLY (1 << 1)
#define VHUB_CTRL_UPSTREAM_CONNECT (1 << 0)
/* IER & ISR */
#define VHUB_IRQ_USB_CMD_DEADLOCK (1 << 18)
#define VHUB_IRQ_EP_POOL_NAK (1 << 17)
#define VHUB_IRQ_EP_POOL_ACK_STALL (1 << 16)
#define VHUB_IRQ_DEVICE5 (1 << 13)
#define VHUB_IRQ_DEVICE4 (1 << 12)
#define VHUB_IRQ_DEVICE3 (1 << 11)
#define VHUB_IRQ_DEVICE2 (1 << 10)
#define VHUB_IRQ_DEVICE1 (1 << 9)
#define VHUB_IRQ_BUS_RESUME (1 << 8)
#define VHUB_IRQ_BUS_SUSPEND (1 << 7)
#define VHUB_IRQ_BUS_RESET (1 << 6)
#define VHUB_IRQ_HUB_EP1_IN_DATA_ACK (1 << 5)
#define VHUB_IRQ_HUB_EP0_IN_DATA_NAK (1 << 4)
#define VHUB_IRQ_HUB_EP0_IN_ACK_STALL (1 << 3)
#define VHUB_IRQ_HUB_EP0_OUT_NAK (1 << 2)
#define VHUB_IRQ_HUB_EP0_OUT_ACK_STALL (1 << 1)
#define VHUB_IRQ_HUB_EP0_SETUP (1 << 0)
#define VHUB_IRQ_ACK_ALL 0x1ff
/* SW reset reg */
#define VHUB_SW_RESET_EP_POOL (1 << 9)
#define VHUB_SW_RESET_DMA_CONTROLLER (1 << 8)
#define VHUB_SW_RESET_DEVICE5 (1 << 5)
#define VHUB_SW_RESET_DEVICE4 (1 << 4)
#define VHUB_SW_RESET_DEVICE3 (1 << 3)
#define VHUB_SW_RESET_DEVICE2 (1 << 2)
#define VHUB_SW_RESET_DEVICE1 (1 << 1)
#define VHUB_SW_RESET_ROOT_HUB (1 << 0)
#define VHUB_SW_RESET_ALL (VHUB_SW_RESET_EP_POOL | \
VHUB_SW_RESET_DMA_CONTROLLER | \
VHUB_SW_RESET_DEVICE5 | \
VHUB_SW_RESET_DEVICE4 | \
VHUB_SW_RESET_DEVICE3 | \
VHUB_SW_RESET_DEVICE2 | \
VHUB_SW_RESET_DEVICE1 | \
VHUB_SW_RESET_ROOT_HUB)
/* EP ACK/NACK IRQ masks */
#define VHUB_EP_IRQ(n) (1 << (n))
#define VHUB_EP_IRQ_ALL 0x7fff /* 15 EPs */
/* USB status reg */
#define VHUB_USBSTS_HISPEED (1 << 27)
/* EP toggle */
#define VHUB_EP_TOGGLE_VALUE (1 << 8)
#define VHUB_EP_TOGGLE_SET_EPNUM(x) ((x) & 0x1f)
/* HUB EP0 control */
#define VHUB_EP0_CTRL_STALL (1 << 0)
#define VHUB_EP0_TX_BUFF_RDY (1 << 1)
#define VHUB_EP0_RX_BUFF_RDY (1 << 2)
#define VHUB_EP0_RX_LEN(x) (((x) >> 16) & 0x7f)
#define VHUB_EP0_SET_TX_LEN(x) (((x) & 0x7f) << 8)
/* HUB EP1 control */
#define VHUB_EP1_CTRL_RESET_TOGGLE (1 << 2)
#define VHUB_EP1_CTRL_STALL (1 << 1)
#define VHUB_EP1_CTRL_ENABLE (1 << 0)
/***********************************
* *
* per-device register definitions *
* *
***********************************/
#define AST_VHUB_DEV_EN_CTRL 0x00
#define AST_VHUB_DEV_ISR 0x04
#define AST_VHUB_DEV_EP0_CTRL 0x08
#define AST_VHUB_DEV_EP0_DATA 0x0c
/* Device enable control */
#define VHUB_DEV_EN_SET_ADDR(x) ((x) << 8)
#define VHUB_DEV_EN_ADDR_MASK ((0xff) << 8)
#define VHUB_DEV_EN_EP0_NAK_IRQEN (1 << 6)
#define VHUB_DEV_EN_EP0_IN_ACK_IRQEN (1 << 5)
#define VHUB_DEV_EN_EP0_OUT_NAK_IRQEN (1 << 4)
#define VHUB_DEV_EN_EP0_OUT_ACK_IRQEN (1 << 3)
#define VHUB_DEV_EN_EP0_SETUP_IRQEN (1 << 2)
#define VHUB_DEV_EN_SPEED_SEL_HIGH (1 << 1)
#define VHUB_DEV_EN_ENABLE_PORT (1 << 0)
/* Interrupt status */
#define VHUV_DEV_IRQ_EP0_IN_DATA_NACK (1 << 4)
#define VHUV_DEV_IRQ_EP0_IN_ACK_STALL (1 << 3)
#define VHUV_DEV_IRQ_EP0_OUT_DATA_NACK (1 << 2)
#define VHUV_DEV_IRQ_EP0_OUT_ACK_STALL (1 << 1)
#define VHUV_DEV_IRQ_EP0_SETUP (1 << 0)
/* Control bits.
*
* Note: The driver relies on the bulk of those bits
* matching corresponding vHub EP0 control bits
*/
#define VHUB_DEV_EP0_CTRL_STALL VHUB_EP0_CTRL_STALL
#define VHUB_DEV_EP0_TX_BUFF_RDY VHUB_EP0_TX_BUFF_RDY
#define VHUB_DEV_EP0_RX_BUFF_RDY VHUB_EP0_RX_BUFF_RDY
#define VHUB_DEV_EP0_RX_LEN(x) VHUB_EP0_RX_LEN(x)
#define VHUB_DEV_EP0_SET_TX_LEN(x) VHUB_EP0_SET_TX_LEN(x)
/*************************************
* *
* per-endpoint register definitions *
* *
*************************************/
#define AST_VHUB_EP_CONFIG 0x00
#define AST_VHUB_EP_DMA_CTLSTAT 0x04
#define AST_VHUB_EP_DESC_BASE 0x08
#define AST_VHUB_EP_DESC_STATUS 0x0C
/* EP config reg */
#define VHUB_EP_CFG_SET_MAX_PKT(x) (((x) & 0x3ff) << 16)
#define VHUB_EP_CFG_AUTO_DATA_DISABLE (1 << 13)
#define VHUB_EP_CFG_STALL_CTRL (1 << 12)
#define VHUB_EP_CFG_SET_EP_NUM(x) (((x) & 0xf) << 8)
#define VHUB_EP_CFG_SET_TYPE(x) ((x) << 5)
#define EP_TYPE_OFF 0
#define EP_TYPE_BULK 1
#define EP_TYPE_INT 2
#define EP_TYPE_ISO 3
#define VHUB_EP_CFG_DIR_OUT (1 << 4)
#define VHUB_EP_CFG_SET_DEV(x) ((x) << 1)
#define VHUB_EP_CFG_ENABLE (1 << 0)
/* EP DMA control */
#define VHUB_EP_DMA_PROC_STATUS(x) (((x) >> 4) & 0xf)
#define EP_DMA_PROC_RX_IDLE 0
#define EP_DMA_PROC_TX_IDLE 8
#define VHUB_EP_DMA_IN_LONG_MODE (1 << 3)
#define VHUB_EP_DMA_OUT_CONTIG_MODE (1 << 3)
#define VHUB_EP_DMA_CTRL_RESET (1 << 2)
#define VHUB_EP_DMA_SINGLE_STAGE (1 << 1)
#define VHUB_EP_DMA_DESC_MODE (1 << 0)
/* EP DMA status */
#define VHUB_EP_DMA_SET_TX_SIZE(x) ((x) << 16)
#define VHUB_EP_DMA_TX_SIZE(x) (((x) >> 16) & 0x7ff)
#define VHUB_EP_DMA_RPTR(x) (((x) >> 8) & 0xff)
#define VHUB_EP_DMA_SET_RPTR(x) (((x) & 0xff) << 8)
#define VHUB_EP_DMA_SET_CPU_WPTR(x) (x)
#define VHUB_EP_DMA_SINGLE_KICK (1 << 0) /* WPTR = 1 for single mode */
/*******************************
* *
* DMA descriptors definitions *
* *
*******************************/
/* Desc W1 IN */
#define VHUB_DSC1_IN_INTERRUPT (1 << 31)
#define VHUB_DSC1_IN_SPID_DATA0 (0 << 14)
#define VHUB_DSC1_IN_SPID_DATA2 (1 << 14)
#define VHUB_DSC1_IN_SPID_DATA1 (2 << 14)
#define VHUB_DSC1_IN_SPID_MDATA (3 << 14)
#define VHUB_DSC1_IN_SET_LEN(x) ((x) & 0xfff)
#define VHUB_DSC1_IN_LEN(x) ((x) & 0xfff)
/****************************************
* *
* Data structures and misc definitions *
* *
****************************************/
#define AST_VHUB_NUM_GEN_EPs 15 /* Generic non-0 EPs */
#define AST_VHUB_NUM_PORTS 5 /* vHub ports */
#define AST_VHUB_EP0_MAX_PACKET 64 /* EP0's max packet size */
#define AST_VHUB_EPn_MAX_PACKET 1024 /* Generic EPs max packet size */
#define AST_VHUB_DESCS_COUNT 256 /* Use 256 descriptor mode (valid
* values are 256 and 32)
*/
struct ast_vhub;
struct ast_vhub_dev;
/*
* DMA descriptor (generic EPs only, currently only used
* for IN endpoints
*/
struct ast_vhub_desc {
__le32 w0;
__le32 w1;
};
/* A transfer request, either core-originated or internal */
struct ast_vhub_req {
struct usb_request req;
struct list_head queue;
/* Actual count written to descriptors (desc mode only) */
unsigned int act_count;
/*
* Desc number of the final packet or -1. For non-desc
* mode (or ep0), any >= 0 value means "last packet"
*/
int last_desc;
/* Request active (pending DMAs) */
bool active : 1;
/* Internal request (don't call back core) */
bool internal : 1;
};
#define to_ast_req(__ureq) container_of(__ureq, struct ast_vhub_req, req)
/* Current state of an EP0 */
enum ep0_state {
ep0_state_token,
ep0_state_data,
ep0_state_status,
};
/*
* An endpoint, either generic, ep0, actual gadget EP
* or internal use vhub EP0. vhub EP1 doesn't have an
* associated structure as it's mostly HW managed.
*/
struct ast_vhub_ep {
struct usb_ep ep;
/* Request queue */
struct list_head queue;
/* EP index in the device, 0 means this is an EP0 */
unsigned int d_idx;
/* Dev pointer or NULL for vHub EP0 */
struct ast_vhub_dev *dev;
/* vHub itself */
struct ast_vhub *vhub;
/*
* DMA buffer for EP0, fallback DMA buffer for misaligned
* OUT transfers for generic EPs
*/
void *buf;
dma_addr_t buf_dma;
/* The rest depends on the EP type */
union {
/* EP0 (either device or vhub) */
struct {
/*
* EP0 registers are "similar" for
* vHub and devices but located in
* different places.
*/
void __iomem *ctlstat;
void __iomem *setup;
/* Current state & direction */
enum ep0_state state;
bool dir_in;
/* Internal use request */
struct ast_vhub_req req;
} ep0;
/* Generic endpoint (aka EPn) */
struct {
/* Registers */
void __iomem *regs;
/* Index in global pool (0..14) */
unsigned int g_idx;
/* DMA Descriptors */
struct ast_vhub_desc *descs;
dma_addr_t descs_dma;
unsigned int d_next;
unsigned int d_last;
unsigned int dma_conf;
/* Max chunk size for IN EPs */
unsigned int chunk_max;
/* State flags */
bool is_in : 1;
bool is_iso : 1;
bool stalled : 1;
bool wedged : 1;
bool enabled : 1;
bool desc_mode : 1;
} epn;
};
};
#define to_ast_ep(__uep) container_of(__uep, struct ast_vhub_ep, ep)
/* A device attached to a vHub port */
struct ast_vhub_dev {
struct ast_vhub *vhub;
void __iomem *regs;
/* Device index (0...4) and name string */
unsigned int index;
const char *name;
/* sysfs enclosure for the gadget gunk */
struct device *port_dev;
/* Link to gadget core */
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
bool registered : 1;
bool wakeup_en : 1;
bool suspended : 1;
bool enabled : 1;
/* Endpoint structures */
struct ast_vhub_ep ep0;
struct ast_vhub_ep *epns[AST_VHUB_NUM_GEN_EPs];
};
#define to_ast_dev(__g) container_of(__g, struct ast_vhub_dev, gadget)
/* Per vhub port stateinfo structure */
struct ast_vhub_port {
/* Port status & status change registers */
u16 status;
u16 change;
/* Associated device slot */
struct ast_vhub_dev dev;
};
/* Global vhub structure */
struct ast_vhub {
struct platform_device *pdev;
void __iomem *regs;
int irq;
spinlock_t lock;
struct work_struct wake_work;
struct clk *clk;
/* EP0 DMA buffers allocated in one chunk */
void *ep0_bufs;
dma_addr_t ep0_bufs_dma;
/* EP0 of the vhub itself */
struct ast_vhub_ep ep0;
/* State of vhub ep1 */
bool ep1_stalled : 1;
/* Per-port info */
struct ast_vhub_port ports[AST_VHUB_NUM_PORTS];
/* Generic EP data structures */
struct ast_vhub_ep epns[AST_VHUB_NUM_GEN_EPs];
/* Upstream bus is suspended ? */
bool suspended : 1;
/* Hub itself can signal remote wakeup */
bool wakeup_en : 1;
/* Force full speed only */
bool force_usb1 : 1;
/* Upstream bus speed captured at bus reset */
unsigned int speed;
};
/* Standard request handlers result codes */
enum std_req_rc {
std_req_stall = -1, /* Stall requested */
std_req_complete = 0, /* Request completed with no data */
std_req_data = 1, /* Request completed with data */
std_req_driver = 2, /* Pass to driver pls */
};
#ifdef CONFIG_USB_GADGET_VERBOSE
#define UDCVDBG(u, fmt...) dev_dbg(&(u)->pdev->dev, fmt)
#define EPVDBG(ep, fmt, ...) do { \
dev_dbg(&(ep)->vhub->pdev->dev, \
"%s:EP%d " fmt, \
(ep)->dev ? (ep)->dev->name : "hub", \
(ep)->d_idx, ##__VA_ARGS__); \
} while(0)
#define DVDBG(d, fmt, ...) do { \
dev_dbg(&(d)->vhub->pdev->dev, \
"%s " fmt, (d)->name, \
##__VA_ARGS__); \
} while(0)
#else
#define UDCVDBG(u, fmt...) do { } while(0)
#define EPVDBG(ep, fmt, ...) do { } while(0)
#define DVDBG(d, fmt, ...) do { } while(0)
#endif
#ifdef CONFIG_USB_GADGET_DEBUG
#define UDCDBG(u, fmt...) dev_dbg(&(u)->pdev->dev, fmt)
#define EPDBG(ep, fmt, ...) do { \
dev_dbg(&(ep)->vhub->pdev->dev, \
"%s:EP%d " fmt, \
(ep)->dev ? (ep)->dev->name : "hub", \
(ep)->d_idx, ##__VA_ARGS__); \
} while(0)
#define DDBG(d, fmt, ...) do { \
dev_dbg(&(d)->vhub->pdev->dev, \
"%s " fmt, (d)->name, \
##__VA_ARGS__); \
} while(0)
#else
#define UDCDBG(u, fmt...) do { } while(0)
#define EPDBG(ep, fmt, ...) do { } while(0)
#define DDBG(d, fmt, ...) do { } while(0)
#endif
/* core.c */
void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
int status);
void ast_vhub_nuke(struct ast_vhub_ep *ep, int status);
struct usb_request *ast_vhub_alloc_request(struct usb_ep *u_ep,
gfp_t gfp_flags);
void ast_vhub_free_request(struct usb_ep *u_ep, struct usb_request *u_req);
void ast_vhub_init_hw(struct ast_vhub *vhub);
/* ep0.c */
void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack);
void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep);
void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
struct ast_vhub_dev *dev);
int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len);
int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...);
#define ast_vhub_simple_reply(udc, ...) \
__ast_vhub_simple_reply((udc), \
sizeof((u8[]) { __VA_ARGS__ })/sizeof(u8), \
__VA_ARGS__)
/* hub.c */
void ast_vhub_init_hub(struct ast_vhub *vhub);
enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq);
enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq);
void ast_vhub_device_connect(struct ast_vhub *vhub, unsigned int port,
bool on);
void ast_vhub_hub_suspend(struct ast_vhub *vhub);
void ast_vhub_hub_resume(struct ast_vhub *vhub);
void ast_vhub_hub_reset(struct ast_vhub *vhub);
void ast_vhub_hub_wake_all(struct ast_vhub *vhub);
/* dev.c */
int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx);
void ast_vhub_del_dev(struct ast_vhub_dev *d);
void ast_vhub_dev_irq(struct ast_vhub_dev *d);
int ast_vhub_std_dev_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq);
/* epn.c */
void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep);
void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep);
struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr);
void ast_vhub_dev_suspend(struct ast_vhub_dev *d);
void ast_vhub_dev_resume(struct ast_vhub_dev *d);
void ast_vhub_dev_reset(struct ast_vhub_dev *d);
#endif /* __ASPEED_VHUB_H */

View File

@@ -20,7 +20,6 @@
#include <linux/ctype.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/atmel_usba_udc.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/irq.h>
@@ -207,94 +206,45 @@ static void usba_ep_init_debugfs(struct usba_udc *udc,
struct dentry *ep_root;
ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
if (!ep_root)
goto err_root;
ep->debugfs_dir = ep_root;
ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
ep, &queue_dbg_fops);
if (!ep->debugfs_queue)
goto err_queue;
if (ep->can_dma) {
ep->debugfs_dma_status
= debugfs_create_u32("dma_status", 0400, ep_root,
&ep->last_dma_status);
if (!ep->debugfs_dma_status)
goto err_dma_status;
}
if (ep_is_control(ep)) {
ep->debugfs_state
= debugfs_create_u32("state", 0400, ep_root,
&ep->state);
if (!ep->debugfs_state)
goto err_state;
}
return;
err_state:
debugfs_create_file("queue", 0400, ep_root, ep, &queue_dbg_fops);
if (ep->can_dma)
debugfs_remove(ep->debugfs_dma_status);
err_dma_status:
debugfs_remove(ep->debugfs_queue);
err_queue:
debugfs_remove(ep_root);
err_root:
dev_err(&ep->udc->pdev->dev,
"failed to create debugfs directory for %s\n", ep->ep.name);
debugfs_create_u32("dma_status", 0400, ep_root,
&ep->last_dma_status);
if (ep_is_control(ep))
debugfs_create_u32("state", 0400, ep_root, &ep->state);
}
static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
{
debugfs_remove(ep->debugfs_queue);
debugfs_remove(ep->debugfs_dma_status);
debugfs_remove(ep->debugfs_state);
debugfs_remove(ep->debugfs_dir);
ep->debugfs_dma_status = NULL;
ep->debugfs_dir = NULL;
debugfs_remove_recursive(ep->debugfs_dir);
}
static void usba_init_debugfs(struct usba_udc *udc)
{
struct dentry *root, *regs;
struct dentry *root;
struct resource *regs_resource;
root = debugfs_create_dir(udc->gadget.name, NULL);
if (IS_ERR(root) || !root)
goto err_root;
udc->debugfs_root = root;
regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
CTRL_IOMEM_ID);
if (regs_resource) {
regs = debugfs_create_file_size("regs", 0400, root, udc,
&regs_dbg_fops,
resource_size(regs_resource));
if (!regs)
goto err_regs;
udc->debugfs_regs = regs;
debugfs_create_file_size("regs", 0400, root, udc,
&regs_dbg_fops,
resource_size(regs_resource));
}
usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
return;
err_regs:
debugfs_remove(root);
err_root:
udc->debugfs_root = NULL;
dev_err(&udc->pdev->dev, "debugfs is not available\n");
}
static void usba_cleanup_debugfs(struct usba_udc *udc)
{
usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
debugfs_remove(udc->debugfs_regs);
debugfs_remove(udc->debugfs_root);
udc->debugfs_regs = NULL;
udc->debugfs_root = NULL;
debugfs_remove_recursive(udc->debugfs_root);
}
#else
static inline void usba_ep_init_debugfs(struct usba_udc *udc,
@@ -417,7 +367,7 @@ static inline void usba_int_enb_set(struct usba_udc *udc, u32 val)
static int vbus_is_present(struct usba_udc *udc)
{
if (udc->vbus_pin)
return gpiod_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
return gpiod_get_value(udc->vbus_pin);
/* No Vbus detection: Assume always present */
return 1;
@@ -2076,7 +2026,6 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
udc->vbus_pin = devm_gpiod_get_optional(&pdev->dev, "atmel,vbus",
GPIOD_IN);
udc->vbus_pin_inverted = gpiod_is_active_low(udc->vbus_pin);
if (fifo_mode == 0) {
pp = NULL;
@@ -2279,15 +2228,15 @@ static int usba_udc_probe(struct platform_device *pdev)
if (udc->vbus_pin) {
irq_set_status_flags(gpiod_to_irq(udc->vbus_pin), IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev,
gpiod_to_irq(udc->vbus_pin), NULL,
usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
"atmel_usba_udc", udc);
if (ret) {
udc->vbus_pin = NULL;
dev_warn(&udc->pdev->dev,
"failed to request vbus irq; "
"assuming always on\n");
}
gpiod_to_irq(udc->vbus_pin), NULL,
usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
"atmel_usba_udc", udc);
if (ret) {
udc->vbus_pin = NULL;
dev_warn(&udc->pdev->dev,
"failed to request vbus irq; "
"assuming always on\n");
}
}
ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);

View File

@@ -287,9 +287,6 @@ struct usba_ep {
#ifdef CONFIG_USB_GADGET_DEBUG_FS
u32 last_dma_status;
struct dentry *debugfs_dir;
struct dentry *debugfs_queue;
struct dentry *debugfs_dma_status;
struct dentry *debugfs_state;
#endif
};
@@ -326,7 +323,6 @@ struct usba_udc {
const struct usba_udc_errata *errata;
int irq;
struct gpio_desc *vbus_pin;
int vbus_pin_inverted;
int num_ep;
int configured_ep;
struct usba_fifo_cfg *fifo_cfg;
@@ -345,7 +341,6 @@ struct usba_udc {
#ifdef CONFIG_USB_GADGET_DEBUG_FS
struct dentry *debugfs_root;
struct dentry *debugfs_regs;
#endif
struct regmap *pmc;

View File

@@ -288,8 +288,6 @@ struct bcm63xx_req {
* @ep0_reply: Pending reply from gadget driver.
* @ep0_request: Outstanding ep0 request.
* @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
* @debugfs_usbd: debugfs file "usbd" for controller state.
* @debugfs_iudma: debugfs file "usbd" for IUDMA state.
*/
struct bcm63xx_udc {
spinlock_t lock;
@@ -330,8 +328,6 @@ struct bcm63xx_udc {
struct usb_request *ep0_request;
struct dentry *debugfs_root;
struct dentry *debugfs_usbd;
struct dentry *debugfs_iudma;
};
static const struct usb_ep_ops bcm63xx_udc_ep_ops;
@@ -2247,34 +2243,16 @@ DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
*/
static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
{
struct dentry *root, *usbd, *iudma;
struct dentry *root;
if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
return;
root = debugfs_create_dir(udc->gadget.name, NULL);
if (IS_ERR(root) || !root)
goto err_root;
usbd = debugfs_create_file("usbd", 0400, root, udc,
&bcm63xx_usbd_dbg_fops);
if (!usbd)
goto err_usbd;
iudma = debugfs_create_file("iudma", 0400, root, udc,
&bcm63xx_iudma_dbg_fops);
if (!iudma)
goto err_iudma;
udc->debugfs_root = root;
udc->debugfs_usbd = usbd;
udc->debugfs_iudma = iudma;
return;
err_iudma:
debugfs_remove(usbd);
err_usbd:
debugfs_remove(root);
err_root:
dev_err(udc->dev, "debugfs is not available\n");
debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
}
/**
@@ -2285,12 +2263,7 @@ err_root:
*/
static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
{
debugfs_remove(udc->debugfs_iudma);
debugfs_remove(udc->debugfs_usbd);
debugfs_remove(udc->debugfs_root);
udc->debugfs_iudma = NULL;
udc->debugfs_usbd = NULL;
udc->debugfs_root = NULL;
debugfs_remove_recursive(udc->debugfs_root);
}
/***********************************************************************

View File

@@ -244,6 +244,12 @@ EXPORT_SYMBOL_GPL(usb_ep_free_request);
* Returns zero, or a negative error code. Endpoints that are not enabled
* report errors; errors will also be
* reported when the usb peripheral is disconnected.
*
* If and only if @req is successfully queued (the return value is zero),
* @req->complete() will be called exactly once, when the Gadget core and
* UDC are finished with the request. When the completion function is called,
* control of the request is returned to the device driver which submitted it.
* The completion handler may then immediately free or reuse @req.
*/
int usb_ep_queue(struct usb_ep *ep,
struct usb_request *req, gfp_t gfp_flags)

View File

@@ -253,6 +253,7 @@ static int dr_controller_setup(struct fsl_udc *udc)
portctrl |= PORTSCX_PTW_16BIT;
/* fall through */
case FSL_USB2_PHY_UTMI:
case FSL_USB2_PHY_UTMI_DUAL:
if (udc->pdata->have_sysif_regs) {
if (udc->pdata->controller_ver) {
/* controller version 1.6 or above */

View File

@@ -209,15 +209,12 @@ static void gr_dfs_create(struct gr_udc *dev)
const char *name = "gr_udc_state";
dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
&gr_dfs_fops);
debugfs_create_file(name, 0444, dev->dfs_root, dev, &gr_dfs_fops);
}
static void gr_dfs_delete(struct gr_udc *dev)
{
/* Handles NULL and ERR pointers internally */
debugfs_remove(dev->dfs_state);
debugfs_remove(dev->dfs_root);
debugfs_remove_recursive(dev->dfs_root);
}
#else /* !CONFIG_USB_GADGET_DEBUG_FS */

View File

@@ -217,7 +217,6 @@ struct gr_udc {
spinlock_t lock; /* General lock, a.k.a. "dev->lock" in comments */
struct dentry *dfs_root;
struct dentry *dfs_state;
};
#define to_gr_udc(gadget) (container_of((gadget), struct gr_udc, gadget))

View File

@@ -205,50 +205,19 @@ DEFINE_SHOW_ATTRIBUTE(eps_dbg);
static void pxa_init_debugfs(struct pxa_udc *udc)
{
struct dentry *root, *state, *queues, *eps;
struct dentry *root;
root = debugfs_create_dir(udc->gadget.name, NULL);
if (IS_ERR(root) || !root)
goto err_root;
state = debugfs_create_file("udcstate", 0400, root, udc,
&state_dbg_fops);
if (!state)
goto err_state;
queues = debugfs_create_file("queues", 0400, root, udc,
&queues_dbg_fops);
if (!queues)
goto err_queues;
eps = debugfs_create_file("epstate", 0400, root, udc,
&eps_dbg_fops);
if (!eps)
goto err_eps;
udc->debugfs_root = root;
udc->debugfs_state = state;
udc->debugfs_queues = queues;
udc->debugfs_eps = eps;
return;
err_eps:
debugfs_remove(eps);
err_queues:
debugfs_remove(queues);
err_state:
debugfs_remove(root);
err_root:
dev_err(udc->dev, "debugfs is not available\n");
debugfs_create_file("udcstate", 0400, root, udc, &state_dbg_fops);
debugfs_create_file("queues", 0400, root, udc, &queues_dbg_fops);
debugfs_create_file("epstate", 0400, root, udc, &eps_dbg_fops);
}
static void pxa_cleanup_debugfs(struct pxa_udc *udc)
{
debugfs_remove(udc->debugfs_eps);
debugfs_remove(udc->debugfs_queues);
debugfs_remove(udc->debugfs_state);
debugfs_remove(udc->debugfs_root);
udc->debugfs_eps = NULL;
udc->debugfs_queues = NULL;
udc->debugfs_state = NULL;
udc->debugfs_root = NULL;
debugfs_remove_recursive(udc->debugfs_root);
}
#else

View File

@@ -476,9 +476,6 @@ struct pxa_udc {
#endif
#ifdef CONFIG_USB_GADGET_DEBUG_FS
struct dentry *debugfs_root;
struct dentry *debugfs_state;
struct dentry *debugfs_queues;
struct dentry *debugfs_eps;
#endif
};
#define to_pxa(g) (container_of((g), struct pxa_udc, gadget))

View File

@@ -333,6 +333,7 @@ struct renesas_usb3 {
struct extcon_dev *extcon;
struct work_struct extcon_work;
struct phy *phy;
struct dentry *dentry;
struct renesas_usb3_ep *usb3_ep;
int num_usb3_eps;
@@ -622,6 +623,13 @@ static void usb3_disconnect(struct renesas_usb3 *usb3)
usb3_usb2_pullup(usb3, 0);
usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
usb3_reset_epc(usb3);
usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP |
USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
USB_INT_1_SPEED | USB_INT_1_B3_WRMRST |
USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND |
USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
usb3_init_epc_registers(usb3);
if (usb3->driver)
usb3->driver->disconnect(&usb3->gadget);
@@ -2383,18 +2391,10 @@ static const struct file_operations renesas_usb3_b_device_fops = {
static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
struct device *dev)
{
struct dentry *root, *file;
usb3->dentry = debugfs_create_dir(dev_name(dev), NULL);
root = debugfs_create_dir(dev_name(dev), NULL);
if (IS_ERR_OR_NULL(root)) {
dev_info(dev, "%s: Can't create the root\n", __func__);
return;
}
file = debugfs_create_file("b_device", 0644, root, usb3,
&renesas_usb3_b_device_fops);
if (!file)
dev_info(dev, "%s: Can't create debugfs mode\n", __func__);
debugfs_create_file("b_device", 0644, usb3->dentry, usb3,
&renesas_usb3_b_device_fops);
}
/*------- platform_driver ------------------------------------------------*/
@@ -2402,14 +2402,13 @@ static int renesas_usb3_remove(struct platform_device *pdev)
{
struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
debugfs_remove_recursive(usb3->dentry);
device_remove_file(&pdev->dev, &dev_attr_role);
usb_del_gadget_udc(&usb3->gadget);
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
__renesas_usb3_ep_free_request(usb3->ep0_req);
if (usb3->phy)
phy_put(usb3->phy);
pm_runtime_disable(&pdev->dev);
return 0;
@@ -2628,6 +2627,17 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (ret < 0)
goto err_alloc_prd;
/*
* This is optional. So, if this driver cannot get a phy,
* this driver will not handle a phy anymore.
*/
usb3->phy = devm_phy_optional_get(&pdev->dev, "usb");
if (IS_ERR(usb3->phy)) {
ret = PTR_ERR(usb3->phy);
goto err_add_udc;
}
pm_runtime_enable(&pdev->dev);
ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget);
if (ret < 0)
goto err_add_udc;
@@ -2636,20 +2646,11 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (ret < 0)
goto err_dev_create;
/*
* This is an optional. So, if this driver cannot get a phy,
* this driver will not handle a phy anymore.
*/
usb3->phy = devm_phy_get(&pdev->dev, "usb");
if (IS_ERR(usb3->phy))
usb3->phy = NULL;
usb3->workaround_for_vbus = priv->workaround_for_vbus;
renesas_usb3_debugfs_init(usb3, &pdev->dev);
dev_info(&pdev->dev, "probed%s\n", usb3->phy ? " with phy" : "");
pm_runtime_enable(usb3_to_dev(usb3));
return 0;

View File

@@ -1871,13 +1871,9 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
if (retval)
goto err_add_udc;
if (s3c2410_udc_debugfs_root) {
udc->regs_info = debugfs_create_file("registers", S_IRUGO,
s3c2410_udc_debugfs_root,
udc, &s3c2410_udc_debugfs_fops);
if (!udc->regs_info)
dev_warn(dev, "debugfs file creation failed\n");
}
udc->regs_info = debugfs_create_file("registers", S_IRUGO,
s3c2410_udc_debugfs_root, udc,
&s3c2410_udc_debugfs_fops);
dev_dbg(dev, "probe ok\n");
@@ -1994,11 +1990,6 @@ static int __init udc_init(void)
dprintk(DEBUG_NORMAL, "%s\n", gadget_name);
s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name, NULL);
if (IS_ERR(s3c2410_udc_debugfs_root)) {
pr_err("%s: debugfs dir creation failed %ld\n",
gadget_name, PTR_ERR(s3c2410_udc_debugfs_root));
s3c2410_udc_debugfs_root = NULL;
}
retval = platform_driver_register(&udc_driver_24x0);
if (retval)
@@ -2014,7 +2005,7 @@ err:
static void __exit udc_exit(void)
{
platform_driver_unregister(&udc_driver_24x0);
debugfs_remove(s3c2410_udc_debugfs_root);
debugfs_remove_recursive(s3c2410_udc_debugfs_root);
}
module_init(udc_init);

View File

@@ -33,7 +33,7 @@
* characters (which are also widely used in C strings).
*/
int
usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf)
{
struct usb_string *s;
int len;

View File

@@ -52,6 +52,13 @@ config USB_XHCI_PLATFORM
If unsure, say N.
config USB_XHCI_HISTB
tristate "xHCI support for HiSilicon STB SoCs"
depends on USB_XHCI_PLATFORM && (ARCH_HISI || COMPILE_TEST)
help
Say 'Y' to enable the support for the xHCI host controller
found in HiSilicon STB SoCs.
config USB_XHCI_MTK
tristate "xHCI support for MediaTek SoCs"
select MFD_SYSCON
@@ -234,9 +241,7 @@ config USB_EHCI_TEGRA
tristate "NVIDIA Tegra HCD support"
depends on ARCH_TEGRA
select USB_EHCI_ROOT_HUB_TT
select USB_PHY
select USB_ULPI
select USB_ULPI_VIEWPORT
select USB_TEGRA_PHY
help
This driver enables support for the internal USB Host Controllers
found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.

View File

@@ -74,6 +74,7 @@ obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
obj-$(CONFIG_USB_XHCI_HISTB) += xhci-histb.o
obj-$(CONFIG_USB_XHCI_MTK) += xhci-mtk.o
obj-$(CONFIG_USB_XHCI_TEGRA) += xhci-tegra.o
obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o

View File

@@ -1028,29 +1028,15 @@ static inline void create_debug_files(struct ehci_hcd *ehci)
struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
ehci->debug_dir = debugfs_create_dir(bus->bus_name, ehci_debug_root);
if (!ehci->debug_dir)
return;
if (!debugfs_create_file("async", S_IRUGO, ehci->debug_dir, bus,
&debug_async_fops))
goto file_error;
if (!debugfs_create_file("bandwidth", S_IRUGO, ehci->debug_dir, bus,
&debug_bandwidth_fops))
goto file_error;
if (!debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus,
&debug_periodic_fops))
goto file_error;
if (!debugfs_create_file("registers", S_IRUGO, ehci->debug_dir, bus,
&debug_registers_fops))
goto file_error;
return;
file_error:
debugfs_remove_recursive(ehci->debug_dir);
debugfs_create_file("async", S_IRUGO, ehci->debug_dir, bus,
&debug_async_fops);
debugfs_create_file("bandwidth", S_IRUGO, ehci->debug_dir, bus,
&debug_bandwidth_fops);
debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus,
&debug_periodic_fops);
debugfs_create_file("registers", S_IRUGO, ehci->debug_dir, bus,
&debug_registers_fops);
}
static inline void remove_debug_files(struct ehci_hcd *ehci)

View File

@@ -1311,10 +1311,6 @@ static int __init ehci_hcd_init(void)
#ifdef CONFIG_DYNAMIC_DEBUG
ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
if (!ehci_debug_root) {
retval = -ENOENT;
goto err_debug;
}
#endif
#ifdef PLATFORM_DRIVER
@@ -1361,7 +1357,6 @@ clean0:
#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
ehci_debug_root = NULL;
err_debug:
#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
return retval;

View File

@@ -157,10 +157,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
struct usb_phy *phy;
/* get the PHY device */
if (dev->of_node)
phy = devm_usb_get_phy_by_phandle(dev, "phys", i);
else
phy = devm_usb_get_phy_dev(dev, i);
phy = devm_usb_get_phy_by_phandle(dev, "phys", i);
if (IS_ERR(phy)) {
/* Don't bail out if PHY is not absolutely necessary */
if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY)

View File

@@ -36,7 +36,6 @@
#define DRV_NAME "tegra-ehci"
static struct hc_driver __read_mostly tegra_ehci_hc_driver;
static bool usb1_reset_attempted;
struct tegra_ehci_soc_config {
bool has_hostpc;
@@ -51,68 +50,55 @@ struct tegra_ehci_hcd {
enum tegra_usb_phy_port_speed port_speed;
};
/*
* The 1st USB controller contains some UTMI pad registers that are global for
* all the controllers on the chip. Those registers are also cleared when
* reset is asserted to the 1st controller. This means that the 1st controller
* can only be reset when no other controlled has finished probing. So we'll
* reset the 1st controller before doing any other setup on any of the
* controllers, and then never again.
*
* Since this is a PHY issue, the Tegra PHY driver should probably be doing
* the resetting of the USB controllers. But to keep compatibility with old
* device trees that don't have reset phandles in the PHYs, do it here.
* Those old DTs will be vulnerable to total USB breakage if the 1st EHCI
* device isn't the first one to finish probing, so warn them.
*/
static int tegra_reset_usb_controller(struct platform_device *pdev)
{
struct device_node *phy_np;
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct tegra_ehci_hcd *tegra =
(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
bool has_utmi_pad_registers = false;
struct reset_control *rst;
int err;
phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
if (!phy_np)
return -ENOENT;
if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers"))
has_utmi_pad_registers = true;
if (!usb1_reset_attempted) {
struct reset_control *usb1_reset;
if (!has_utmi_pad_registers)
usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
else
usb1_reset = tegra->rst;
if (IS_ERR(usb1_reset)) {
dev_warn(&pdev->dev,
"can't get utmi-pads reset from the PHY\n");
dev_warn(&pdev->dev,
"continuing, but please update your DT\n");
} else {
reset_control_assert(usb1_reset);
udelay(1);
reset_control_deassert(usb1_reset);
if (!has_utmi_pad_registers)
reset_control_put(usb1_reset);
}
usb1_reset_attempted = true;
}
if (!has_utmi_pad_registers) {
reset_control_assert(tegra->rst);
udelay(1);
reset_control_deassert(tegra->rst);
/*
* The 1st USB controller contains some UTMI pad registers that are
* global for all the controllers on the chip. Those registers are
* also cleared when reset is asserted to the 1st controller.
*/
rst = of_reset_control_get_shared(phy_np, "utmi-pads");
if (IS_ERR(rst)) {
dev_warn(&pdev->dev,
"can't get utmi-pads reset from the PHY\n");
dev_warn(&pdev->dev,
"continuing, but please update your DT\n");
} else {
/*
* PHY driver performs UTMI-pads reset in a case of
* non-legacy DT.
*/
reset_control_put(rst);
}
of_node_put(phy_np);
/* reset control is shared, hence initialize it first */
err = reset_control_deassert(tegra->rst);
if (err)
return err;
err = reset_control_assert(tegra->rst);
if (err)
return err;
udelay(1);
err = reset_control_deassert(tegra->rst);
if (err)
return err;
return 0;
}
@@ -440,7 +426,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto cleanup_hcd_create;
}
tegra->rst = devm_reset_control_get(&pdev->dev, "usb");
tegra->rst = devm_reset_control_get_shared(&pdev->dev, "usb");
if (IS_ERR(tegra->rst)) {
dev_err(&pdev->dev, "Can't get ehci reset\n");
err = PTR_ERR(tegra->rst);
@@ -452,8 +438,10 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto cleanup_hcd_create;
err = tegra_reset_usb_controller(pdev);
if (err)
if (err) {
dev_err(&pdev->dev, "Failed to reset controller\n");
goto cleanup_clk_en;
}
u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
if (IS_ERR(u_phy)) {
@@ -538,6 +526,9 @@ static int tegra_ehci_remove(struct platform_device *pdev)
usb_phy_shutdown(hcd->usb_phy);
usb_remove_hcd(hcd);
reset_control_assert(tegra->rst);
udelay(1);
clk_disable_unprepare(tegra->clk);
usb_put_hcd(hcd);

View File

@@ -83,27 +83,14 @@ void fhci_dfs_create(struct fhci_hcd *fhci)
struct device *dev = fhci_to_hcd(fhci)->self.controller;
fhci->dfs_root = debugfs_create_dir(dev_name(dev), usb_debug_root);
if (!fhci->dfs_root) {
WARN_ON(1);
return;
}
fhci->dfs_regs = debugfs_create_file("regs", S_IFREG | S_IRUGO,
fhci->dfs_root, fhci, &fhci_dfs_regs_fops);
fhci->dfs_irq_stat = debugfs_create_file("irq_stat",
S_IFREG | S_IRUGO, fhci->dfs_root, fhci,
&fhci_dfs_irq_stat_fops);
WARN_ON(!fhci->dfs_regs || !fhci->dfs_irq_stat);
debugfs_create_file("regs", S_IFREG | S_IRUGO, fhci->dfs_root, fhci,
&fhci_dfs_regs_fops);
debugfs_create_file("irq_stat", S_IFREG | S_IRUGO, fhci->dfs_root, fhci,
&fhci_dfs_irq_stat_fops);
}
void fhci_dfs_destroy(struct fhci_hcd *fhci)
{
if (!fhci->dfs_root)
return;
debugfs_remove(fhci->dfs_irq_stat);
debugfs_remove(fhci->dfs_regs);
debugfs_remove(fhci->dfs_root);
debugfs_remove_recursive(fhci->dfs_root);
}

View File

@@ -262,8 +262,6 @@ struct fhci_hcd {
#ifdef CONFIG_FHCI_DEBUG
int usb_irq_stat[13];
struct dentry *dfs_root;
struct dentry *dfs_regs;
struct dentry *dfs_irq_stat;
#endif
};

View File

@@ -844,28 +844,16 @@ static int debug_registers_open(struct inode *inode, struct file *file)
static inline void create_debug_files(struct fotg210_hcd *fotg210)
{
struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
struct dentry *root;
fotg210->debug_dir = debugfs_create_dir(bus->bus_name,
fotg210_debug_root);
if (!fotg210->debug_dir)
return;
root = debugfs_create_dir(bus->bus_name, fotg210_debug_root);
fotg210->debug_dir = root;
if (!debugfs_create_file("async", S_IRUGO, fotg210->debug_dir, bus,
&debug_async_fops))
goto file_error;
if (!debugfs_create_file("periodic", S_IRUGO, fotg210->debug_dir, bus,
&debug_periodic_fops))
goto file_error;
if (!debugfs_create_file("registers", S_IRUGO, fotg210->debug_dir, bus,
&debug_registers_fops))
goto file_error;
return;
file_error:
debugfs_remove_recursive(fotg210->debug_dir);
debugfs_create_file("async", S_IRUGO, root, bus, &debug_async_fops);
debugfs_create_file("periodic", S_IRUGO, root, bus,
&debug_periodic_fops);
debugfs_create_file("registers", S_IRUGO, root, bus,
&debug_registers_fops);
}
static inline void remove_debug_files(struct fotg210_hcd *fotg210)
@@ -5686,10 +5674,6 @@ static int __init fotg210_hcd_init(void)
sizeof(struct fotg210_itd));
fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root);
if (!fotg210_debug_root) {
retval = -ENOENT;
goto err_debug;
}
retval = platform_driver_register(&fotg210_hcd_driver);
if (retval < 0)
@@ -5699,7 +5683,7 @@ static int __init fotg210_hcd_init(void)
clean:
debugfs_remove(fotg210_debug_root);
fotg210_debug_root = NULL;
err_debug:
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
return retval;
}

View File

@@ -417,46 +417,22 @@ DEFINE_SHOW_ATTRIBUTE(debug_isoc);
static void create_debug_files(struct imx21 *imx21)
{
imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL);
if (!imx21->debug_root)
goto failed_create_rootdir;
struct dentry *root;
if (!debugfs_create_file("status", S_IRUGO,
imx21->debug_root, imx21, &debug_status_fops))
goto failed_create;
root = debugfs_create_dir(dev_name(imx21->dev), NULL);
imx21->debug_root = root;
if (!debugfs_create_file("dmem", S_IRUGO,
imx21->debug_root, imx21, &debug_dmem_fops))
goto failed_create;
if (!debugfs_create_file("etd", S_IRUGO,
imx21->debug_root, imx21, &debug_etd_fops))
goto failed_create;
if (!debugfs_create_file("statistics", S_IRUGO,
imx21->debug_root, imx21, &debug_statistics_fops))
goto failed_create;
if (!debugfs_create_file("isoc", S_IRUGO,
imx21->debug_root, imx21, &debug_isoc_fops))
goto failed_create;
return;
failed_create:
debugfs_remove_recursive(imx21->debug_root);
failed_create_rootdir:
imx21->debug_root = NULL;
debugfs_create_file("status", S_IRUGO, root, imx21, &debug_status_fops);
debugfs_create_file("dmem", S_IRUGO, root, imx21, &debug_dmem_fops);
debugfs_create_file("etd", S_IRUGO, root, imx21, &debug_etd_fops);
debugfs_create_file("statistics", S_IRUGO, root, imx21,
&debug_statistics_fops);
debugfs_create_file("isoc", S_IRUGO, root, imx21, &debug_isoc_fops);
}
static void remove_debug_files(struct imx21 *imx21)
{
if (imx21->debug_root) {
debugfs_remove_recursive(imx21->debug_root);
imx21->debug_root = NULL;
}
debugfs_remove_recursive(imx21->debug_root);
}
#endif

View File

@@ -1198,14 +1198,11 @@ static int isp116x_debug_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(isp116x_debug);
static int create_debug_file(struct isp116x *isp116x)
static void create_debug_file(struct isp116x *isp116x)
{
isp116x->dentry = debugfs_create_file(hcd_name,
S_IRUGO, NULL, isp116x,
&isp116x_debug_fops);
if (!isp116x->dentry)
return -ENOMEM;
return 0;
}
static void remove_debug_file(struct isp116x *isp116x)
@@ -1215,8 +1212,8 @@ static void remove_debug_file(struct isp116x *isp116x)
#else
#define create_debug_file(d) 0
#define remove_debug_file(d) do{}while(0)
static inline void create_debug_file(struct isp116x *isp116x) { }
static inline void remove_debug_file(struct isp116x *isp116x) { }
#endif /* CONFIG_DEBUG_FS */
@@ -1643,16 +1640,10 @@ static int isp116x_probe(struct platform_device *pdev)
device_wakeup_enable(hcd->self.controller);
ret = create_debug_file(isp116x);
if (ret) {
ERR("Couldn't create debugfs entry\n");
goto err7;
}
create_debug_file(isp116x);
return 0;
err7:
usb_remove_hcd(hcd);
err6:
usb_put_hcd(hcd);
err5:

View File

@@ -212,7 +212,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
ohci_at91->sfr_regmap = at91_dt_syscon_sfr();
if (!ohci_at91->sfr_regmap)
dev_warn(dev, "failed to find sfr node\n");
dev_dbg(dev, "failed to find sfr node\n");
board = hcd->self.controller->platform_data;
ohci = hcd_to_ohci(hcd);

View File

@@ -762,50 +762,23 @@ static int debug_registers_open(struct inode *inode, struct file *file)
static inline void create_debug_files (struct ohci_hcd *ohci)
{
struct usb_bus *bus = &ohci_to_hcd(ohci)->self;
struct dentry *root;
ohci->debug_dir = debugfs_create_dir(bus->bus_name, ohci_debug_root);
if (!ohci->debug_dir)
goto dir_error;
root = debugfs_create_dir(bus->bus_name, ohci_debug_root);
ohci->debug_dir = root;
ohci->debug_async = debugfs_create_file("async", S_IRUGO,
ohci->debug_dir, ohci,
&debug_async_fops);
if (!ohci->debug_async)
goto async_error;
ohci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
ohci->debug_dir, ohci,
&debug_periodic_fops);
if (!ohci->debug_periodic)
goto periodic_error;
ohci->debug_registers = debugfs_create_file("registers", S_IRUGO,
ohci->debug_dir, ohci,
&debug_registers_fops);
if (!ohci->debug_registers)
goto registers_error;
debugfs_create_file("async", S_IRUGO, root, ohci, &debug_async_fops);
debugfs_create_file("periodic", S_IRUGO, root, ohci,
&debug_periodic_fops);
debugfs_create_file("registers", S_IRUGO, root, ohci,
&debug_registers_fops);
ohci_dbg (ohci, "created debug files\n");
return;
registers_error:
debugfs_remove(ohci->debug_periodic);
periodic_error:
debugfs_remove(ohci->debug_async);
async_error:
debugfs_remove(ohci->debug_dir);
dir_error:
ohci->debug_periodic = NULL;
ohci->debug_async = NULL;
ohci->debug_dir = NULL;
}
static inline void remove_debug_files (struct ohci_hcd *ohci)
{
debugfs_remove(ohci->debug_registers);
debugfs_remove(ohci->debug_periodic);
debugfs_remove(ohci->debug_async);
debugfs_remove(ohci->debug_dir);
debugfs_remove_recursive(ohci->debug_dir);
}
/*-------------------------------------------------------------------------*/

View File

@@ -1258,10 +1258,6 @@ static int __init ohci_hcd_mod_init(void)
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
if (!ohci_debug_root) {
retval = -ENOENT;
goto error_debug;
}
#ifdef PS3_SYSTEM_BUS_DRIVER
retval = ps3_ohci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
@@ -1318,7 +1314,6 @@ static int __init ohci_hcd_mod_init(void)
#endif
debugfs_remove(ohci_debug_root);
ohci_debug_root = NULL;
error_debug:
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
return retval;

View File

@@ -431,9 +431,6 @@ struct ohci_hcd {
struct work_struct nec_work; /* Worker for NEC quirk */
struct dentry *debug_dir;
struct dentry *debug_async;
struct dentry *debug_periodic;
struct dentry *debug_registers;
/* platform-specific data -- must come last */
unsigned long priv[0] __aligned(sizeof(s64));

View File

@@ -1268,23 +1268,3 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
{
/*
* Our dear uPD72020{1,2} friend only partially resets when
* asked to via the XHCI interface, and may end up doing DMA
* at the wrong addresses, as it keeps the top 32bit of some
* addresses from its previous programming under obscure
* circumstances.
* Give it a good wack at probe time. Unfortunately, this
* needs to happen before we've had a chance to discover any
* quirk, or the system will be in a rather bad state.
*/
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
(pdev->device == 0x0014 || pdev->device == 0x0015))
return true;
return false;
}
EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);

View File

@@ -16,7 +16,6 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
bool usb_amd_pt_check_port(struct device *device, int port);
#else
struct pci_dev;

View File

@@ -590,14 +590,10 @@ static int uhci_start(struct usb_hcd *hcd)
init_waitqueue_head(&uhci->waitqh);
#ifdef UHCI_DEBUG_OPS
dentry = debugfs_create_file(hcd->self.bus_name,
S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root,
uhci, &uhci_debug_operations);
if (!dentry) {
dev_err(uhci_dev(uhci), "couldn't create uhci debugfs entry\n");
return -ENOMEM;
}
uhci->dentry = dentry;
uhci->dentry = debugfs_create_file(hcd->self.bus_name,
S_IFREG|S_IRUGO|S_IWUSR,
uhci_debugfs_root, uhci,
&uhci_debug_operations);
#endif
uhci->frame = dma_zalloc_coherent(uhci_dev(uhci),
@@ -882,8 +878,6 @@ static int __init uhci_hcd_init(void)
if (!errbuf)
goto errbuf_failed;
uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
if (!uhci_debugfs_root)
goto debug_failed;
#endif
uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
@@ -918,7 +912,6 @@ up_failed:
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
debugfs_remove(uhci_debugfs_root);
debug_failed:
kfree(errbuf);
errbuf_failed:

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/**
* xhci-dbgcap.c - xHCI debug capability support
*

View File

@@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/**
* xhci-dbgcap.h - xHCI debug capability support
*

View File

@@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/**
* xhci-dbgtty.c - tty glue for xHCI debug capability
*

View File

@@ -8,6 +8,7 @@
*/
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "xhci.h"
#include "xhci-debugfs.h"
@@ -333,6 +334,67 @@ static const struct file_operations xhci_context_fops = {
.release = single_release,
};
static int xhci_portsc_show(struct seq_file *s, void *unused)
{
struct xhci_port *port = s->private;
u32 portsc;
portsc = readl(port->addr);
seq_printf(s, "%s\n", xhci_decode_portsc(portsc));
return 0;
}
static int xhci_port_open(struct inode *inode, struct file *file)
{
return single_open(file, xhci_portsc_show, inode->i_private);
}
static ssize_t xhci_port_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct xhci_port *port = s->private;
struct xhci_hcd *xhci = hcd_to_xhci(port->rhub->hcd);
char buf[32];
u32 portsc;
unsigned long flags;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (!strncmp(buf, "compliance", 10)) {
/* If CTC is clear, compliance is enabled by default */
if (!HCC2_CTC(xhci->hcc_params2))
return count;
spin_lock_irqsave(&xhci->lock, flags);
/* compliance mode can only be enabled on ports in RxDetect */
portsc = readl(port->addr);
if ((portsc & PORT_PLS_MASK) != XDEV_RXDETECT) {
spin_unlock_irqrestore(&xhci->lock, flags);
return -EPERM;
}
portsc = xhci_port_state_to_neutral(portsc);
portsc &= ~PORT_PLS_MASK;
portsc |= PORT_LINK_STROBE | XDEV_COMP_MODE;
writel(portsc, port->addr);
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
return -EINVAL;
}
return count;
}
static const struct file_operations port_fops = {
.open = xhci_port_open,
.write = xhci_port_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
struct xhci_file_map *files,
size_t nentries, void *data,
@@ -449,6 +511,27 @@ void xhci_debugfs_remove_slot(struct xhci_hcd *xhci, int slot_id)
dev->debugfs_private = NULL;
}
static void xhci_debugfs_create_ports(struct xhci_hcd *xhci,
struct dentry *parent)
{
unsigned int num_ports;
char port_name[8];
struct xhci_port *port;
struct dentry *dir;
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
parent = debugfs_create_dir("ports", parent);
while (num_ports--) {
scnprintf(port_name, sizeof(port_name), "port%02d",
num_ports + 1);
dir = debugfs_create_dir(port_name, parent);
port = &xhci->hw_ports[num_ports];
debugfs_create_file("portsc", 0644, dir, port, &port_fops);
}
}
void xhci_debugfs_init(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
@@ -497,6 +580,8 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
xhci->debugfs_root);
xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root);
xhci_debugfs_create_ports(xhci, xhci->debugfs_root);
}
void xhci_debugfs_exit(struct xhci_hcd *xhci)

View File

@@ -0,0 +1,410 @@
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver for HiSilicon STB SoCs
*
* Copyright (C) 2017-2018 HiSilicon Co., Ltd. http://www.hisilicon.com
*
* Authors: Jianguo Sun <sunjianguo1@huawei.com>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include "xhci.h"
#define GTXTHRCFG 0xc108
#define GRXTHRCFG 0xc10c
#define REG_GUSB2PHYCFG0 0xc200
#define BIT_UTMI_8_16 BIT(3)
#define BIT_UTMI_ULPI BIT(4)
#define BIT_FREECLK_EXIST BIT(30)
#define REG_GUSB3PIPECTL0 0xc2c0
#define USB3_DEEMPHASIS_MASK GENMASK(2, 1)
#define USB3_DEEMPHASIS0 BIT(1)
#define USB3_TX_MARGIN1 BIT(4)
struct xhci_hcd_histb {
struct device *dev;
struct usb_hcd *hcd;
void __iomem *ctrl;
struct clk *bus_clk;
struct clk *utmi_clk;
struct clk *pipe_clk;
struct clk *suspend_clk;
struct reset_control *soft_reset;
};
static inline struct xhci_hcd_histb *hcd_to_histb(struct usb_hcd *hcd)
{
return dev_get_drvdata(hcd->self.controller);
}
static int xhci_histb_config(struct xhci_hcd_histb *histb)
{
struct device_node *np = histb->dev->of_node;
u32 regval;
if (of_property_match_string(np, "phys-names", "inno") >= 0) {
/* USB2 PHY chose ulpi 8bit interface */
regval = readl(histb->ctrl + REG_GUSB2PHYCFG0);
regval &= ~BIT_UTMI_ULPI;
regval &= ~(BIT_UTMI_8_16);
regval &= ~BIT_FREECLK_EXIST;
writel(regval, histb->ctrl + REG_GUSB2PHYCFG0);
}
if (of_property_match_string(np, "phys-names", "combo") >= 0) {
/*
* write 0x010c0012 to GUSB3PIPECTL0
* GUSB3PIPECTL0[5:3] = 010 : Tx Margin = 900mV ,
* decrease TX voltage
* GUSB3PIPECTL0[2:1] = 01 : Tx Deemphasis = -3.5dB,
* refer to xHCI spec
*/
regval = readl(histb->ctrl + REG_GUSB3PIPECTL0);
regval &= ~USB3_DEEMPHASIS_MASK;
regval |= USB3_DEEMPHASIS0;
regval |= USB3_TX_MARGIN1;
writel(regval, histb->ctrl + REG_GUSB3PIPECTL0);
}
writel(0x23100000, histb->ctrl + GTXTHRCFG);
writel(0x23100000, histb->ctrl + GRXTHRCFG);
return 0;
}
static int xhci_histb_clks_get(struct xhci_hcd_histb *histb)
{
struct device *dev = histb->dev;
histb->bus_clk = devm_clk_get(dev, "bus");
if (IS_ERR(histb->bus_clk)) {
dev_err(dev, "fail to get bus clk\n");
return PTR_ERR(histb->bus_clk);
}
histb->utmi_clk = devm_clk_get(dev, "utmi");
if (IS_ERR(histb->utmi_clk)) {
dev_err(dev, "fail to get utmi clk\n");
return PTR_ERR(histb->utmi_clk);
}
histb->pipe_clk = devm_clk_get(dev, "pipe");
if (IS_ERR(histb->pipe_clk)) {
dev_err(dev, "fail to get pipe clk\n");
return PTR_ERR(histb->pipe_clk);
}
histb->suspend_clk = devm_clk_get(dev, "suspend");
if (IS_ERR(histb->suspend_clk)) {
dev_err(dev, "fail to get suspend clk\n");
return PTR_ERR(histb->suspend_clk);
}
return 0;
}
static int xhci_histb_host_enable(struct xhci_hcd_histb *histb)
{
int ret;
ret = clk_prepare_enable(histb->bus_clk);
if (ret) {
dev_err(histb->dev, "failed to enable bus clk\n");
return ret;
}
ret = clk_prepare_enable(histb->utmi_clk);
if (ret) {
dev_err(histb->dev, "failed to enable utmi clk\n");
goto err_utmi_clk;
}
ret = clk_prepare_enable(histb->pipe_clk);
if (ret) {
dev_err(histb->dev, "failed to enable pipe clk\n");
goto err_pipe_clk;
}
ret = clk_prepare_enable(histb->suspend_clk);
if (ret) {
dev_err(histb->dev, "failed to enable suspend clk\n");
goto err_suspend_clk;
}
reset_control_deassert(histb->soft_reset);
return 0;
err_suspend_clk:
clk_disable_unprepare(histb->pipe_clk);
err_pipe_clk:
clk_disable_unprepare(histb->utmi_clk);
err_utmi_clk:
clk_disable_unprepare(histb->bus_clk);
return ret;
}
static void xhci_histb_host_disable(struct xhci_hcd_histb *histb)
{
reset_control_assert(histb->soft_reset);
clk_disable_unprepare(histb->suspend_clk);
clk_disable_unprepare(histb->pipe_clk);
clk_disable_unprepare(histb->utmi_clk);
clk_disable_unprepare(histb->bus_clk);
}
static void xhci_histb_quirks(struct device *dev, struct xhci_hcd *xhci)
{
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_PLAT;
}
/* called during probe() after chip reset completes */
static int xhci_histb_setup(struct usb_hcd *hcd)
{
struct xhci_hcd_histb *histb = hcd_to_histb(hcd);
int ret;
if (usb_hcd_is_primary_hcd(hcd)) {
ret = xhci_histb_config(histb);
if (ret)
return ret;
}
return xhci_gen_setup(hcd, xhci_histb_quirks);
}
static const struct xhci_driver_overrides xhci_histb_overrides __initconst = {
.reset = xhci_histb_setup,
};
static struct hc_driver __read_mostly xhci_histb_hc_driver;
static int xhci_histb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct xhci_hcd_histb *histb;
const struct hc_driver *driver;
struct usb_hcd *hcd;
struct xhci_hcd *xhci;
struct resource *res;
int irq;
int ret = -ENODEV;
if (usb_disabled())
return -ENODEV;
driver = &xhci_histb_hc_driver;
histb = devm_kzalloc(dev, sizeof(*histb), GFP_KERNEL);
if (!histb)
return -ENOMEM;
histb->dev = dev;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
histb->ctrl = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(histb->ctrl))
return PTR_ERR(histb->ctrl);
ret = xhci_histb_clks_get(histb);
if (ret)
return ret;
histb->soft_reset = devm_reset_control_get(dev, "soft");
if (IS_ERR(histb->soft_reset)) {
dev_err(dev, "failed to get soft reset\n");
return PTR_ERR(histb->soft_reset);
}
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
device_enable_async_suspend(dev);
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
ret = -ENOMEM;
goto disable_pm;
}
hcd->regs = histb->ctrl;
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
histb->hcd = hcd;
dev_set_drvdata(hcd->self.controller, histb);
ret = xhci_histb_host_enable(histb);
if (ret)
goto put_hcd;
xhci = hcd_to_xhci(hcd);
device_wakeup_enable(hcd->self.controller);
xhci->main_hcd = hcd;
xhci->shared_hcd = usb_create_shared_hcd(driver, dev, dev_name(dev),
hcd);
if (!xhci->shared_hcd) {
ret = -ENOMEM;
goto disable_host;
}
if (device_property_read_bool(dev, "usb2-lpm-disable"))
xhci->quirks |= XHCI_HW_LPM_DISABLE;
if (device_property_read_bool(dev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
/* imod_interval is the interrupt moderation value in nanoseconds. */
xhci->imod_interval = 40000;
device_property_read_u32(dev, "imod-interval-ns",
&xhci->imod_interval);
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto dealloc_usb2_hcd;
device_enable_async_suspend(dev);
pm_runtime_put_noidle(dev);
/*
* Prevent runtime pm from being on as default, users should enable
* runtime pm using power/control in sysfs.
*/
pm_runtime_forbid(dev);
return 0;
dealloc_usb2_hcd:
usb_remove_hcd(hcd);
put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
disable_host:
xhci_histb_host_disable(histb);
put_hcd:
usb_put_hcd(hcd);
disable_pm:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
}
static int xhci_histb_remove(struct platform_device *dev)
{
struct xhci_hcd_histb *histb = platform_get_drvdata(dev);
struct usb_hcd *hcd = histb->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
xhci->xhc_state |= XHCI_STATE_REMOVING;
usb_remove_hcd(xhci->shared_hcd);
device_wakeup_disable(&dev->dev);
usb_remove_hcd(hcd);
usb_put_hcd(xhci->shared_hcd);
xhci_histb_host_disable(histb);
usb_put_hcd(hcd);
pm_runtime_put_sync(&dev->dev);
pm_runtime_disable(&dev->dev);
return 0;
}
static int __maybe_unused xhci_histb_suspend(struct device *dev)
{
struct xhci_hcd_histb *histb = dev_get_drvdata(dev);
struct usb_hcd *hcd = histb->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
ret = xhci_suspend(xhci, device_may_wakeup(dev));
if (!device_may_wakeup(dev))
xhci_histb_host_disable(histb);
return ret;
}
static int __maybe_unused xhci_histb_resume(struct device *dev)
{
struct xhci_hcd_histb *histb = dev_get_drvdata(dev);
struct usb_hcd *hcd = histb->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
if (!device_may_wakeup(dev))
xhci_histb_host_enable(histb);
return xhci_resume(xhci, 0);
}
static const struct dev_pm_ops xhci_histb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(xhci_histb_suspend, xhci_histb_resume)
};
#define DEV_PM_OPS (IS_ENABLED(CONFIG_PM) ? &xhci_histb_pm_ops : NULL)
#ifdef CONFIG_OF
static const struct of_device_id histb_xhci_of_match[] = {
{ .compatible = "hisilicon,hi3798cv200-xhci"},
{ },
};
MODULE_DEVICE_TABLE(of, histb_xhci_of_match);
#endif
static struct platform_driver histb_xhci_driver = {
.probe = xhci_histb_probe,
.remove = xhci_histb_remove,
.driver = {
.name = "xhci-histb",
.pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(histb_xhci_of_match),
},
};
MODULE_ALIAS("platform:xhci-histb");
static int __init xhci_histb_init(void)
{
xhci_init_driver(&xhci_histb_hc_driver, &xhci_histb_overrides);
return platform_driver_register(&histb_xhci_driver);
}
module_init(xhci_histb_init);
static void __exit xhci_histb_exit(void)
{
platform_driver_unregister(&histb_xhci_driver);
}
module_exit(xhci_histb_exit);
MODULE_DESCRIPTION("HiSilicon STB xHCI Host Controller Driver");
MODULE_LICENSE("GPL v2");

View File

@@ -189,9 +189,10 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
__u8 port_removable[(USB_MAXCHILDREN + 1 + 7) / 8];
u32 portsc;
unsigned int i;
struct xhci_hub *rhub;
ports = xhci->num_usb2_ports;
rhub = &xhci->usb2_rhub;
ports = rhub->num_ports;
xhci_common_hub_descriptor(xhci, desc, ports);
desc->bDescriptorType = USB_DT_HUB;
temp = 1 + (ports / 8);
@@ -202,7 +203,7 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
*/
memset(port_removable, 0, sizeof(port_removable));
for (i = 0; i < ports; i++) {
portsc = readl(xhci->usb2_ports[i]);
portsc = readl(rhub->ports[i]->addr);
/* If a device is removable, PORTSC reports a 0, same as in the
* hub descriptor DeviceRemovable bits.
*/
@@ -241,8 +242,10 @@ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
u16 port_removable;
u32 portsc;
unsigned int i;
struct xhci_hub *rhub;
ports = xhci->num_usb3_ports;
rhub = &xhci->usb3_rhub;
ports = rhub->num_ports;
xhci_common_hub_descriptor(xhci, desc, ports);
desc->bDescriptorType = USB_DT_SS_HUB;
desc->bDescLength = USB_DT_SS_HUB_SIZE;
@@ -256,7 +259,7 @@ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
port_removable = 0;
/* bit 0 is reserved, bit 1 is for port 1, etc. */
for (i = 0; i < ports; i++) {
portsc = readl(xhci->usb3_ports[i]);
portsc = readl(rhub->ports[i]->addr);
if (portsc & PORT_DEV_REMOVE)
port_removable |= 1 << (i + 1);
}
@@ -538,28 +541,13 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
port_change_bit, wIndex, port_status);
}
static int xhci_get_ports(struct usb_hcd *hcd, __le32 __iomem ***port_array)
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
{
int max_ports;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
if (hcd->speed >= HCD_USB3) {
max_ports = xhci->num_usb3_ports;
*port_array = xhci->usb3_ports;
} else {
max_ports = xhci->num_usb2_ports;
*port_array = xhci->usb2_ports;
}
return max_ports;
}
static __le32 __iomem *xhci_get_port_io_addr(struct usb_hcd *hcd, int index)
{
__le32 __iomem **port_array;
xhci_get_ports(hcd, &port_array);
return port_array[index];
if (hcd->speed >= HCD_USB3)
return &xhci->usb3_rhub;
return &xhci->usb2_rhub;
}
/*
@@ -570,21 +558,23 @@ static __le32 __iomem *xhci_get_port_io_addr(struct usb_hcd *hcd, int index)
static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
u16 index, bool on, unsigned long *flags)
{
__le32 __iomem *addr;
struct xhci_hub *rhub;
struct xhci_port *port;
u32 temp;
addr = xhci_get_port_io_addr(hcd, index);
temp = readl(addr);
rhub = xhci_get_rhub(hcd);
port = rhub->ports[index];
temp = readl(port->addr);
temp = xhci_port_state_to_neutral(temp);
if (on) {
/* Power on */
writel(temp | PORT_POWER, addr);
temp = readl(addr);
writel(temp | PORT_POWER, port->addr);
temp = readl(port->addr);
xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n",
index, temp);
} else {
/* Power off */
writel(temp & ~PORT_POWER, addr);
writel(temp & ~PORT_POWER, port->addr);
}
spin_unlock_irqrestore(&xhci->lock, *flags);
@@ -600,13 +590,13 @@ static void xhci_port_set_test_mode(struct xhci_hcd *xhci,
u16 test_mode, u16 wIndex)
{
u32 temp;
__le32 __iomem *addr;
struct xhci_port *port;
/* xhci only supports test mode for usb2 ports, i.e. xhci->main_hcd */
addr = xhci_get_port_io_addr(xhci->main_hcd, wIndex);
temp = readl(addr + PORTPMSC);
/* xhci only supports test mode for usb2 ports */
port = xhci->usb2_rhub.ports[wIndex];
temp = readl(port->addr + PORTPMSC);
temp |= test_mode << PORT_TEST_MODE_SHIFT;
writel(temp, addr + PORTPMSC);
writel(temp, port->addr + PORTPMSC);
xhci->test_mode = test_mode;
if (test_mode == TEST_FORCE_EN)
xhci_start(xhci);
@@ -633,10 +623,10 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
/* Put all ports to the Disable state by clear PP */
xhci_dbg(xhci, "Disable all port (PP = 0)\n");
/* Power off USB3 ports*/
for (i = 0; i < xhci->num_usb3_ports; i++)
for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
xhci_set_port_power(xhci, xhci->shared_hcd, i, false, flags);
/* Power off USB2 ports*/
for (i = 0; i < xhci->num_usb2_ports; i++)
for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
xhci_set_port_power(xhci, xhci->main_hcd, i, false, flags);
/* Stop the controller */
xhci_dbg(xhci, "Stop controller\n");
@@ -672,24 +662,24 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
return xhci_reset(xhci);
}
void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
int port_id, u32 link_state)
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
u32 link_state)
{
u32 temp;
temp = readl(port_array[port_id]);
temp = readl(port->addr);
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | link_state;
writel(temp, port_array[port_id]);
writel(temp, port->addr);
}
static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
__le32 __iomem **port_array, int port_id, u16 wake_mask)
struct xhci_port *port, u16 wake_mask)
{
u32 temp;
temp = readl(port_array[port_id]);
temp = readl(port->addr);
temp = xhci_port_state_to_neutral(temp);
if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_CONNECT)
@@ -707,20 +697,20 @@ static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
else
temp &= ~PORT_WKOC_E;
writel(temp, port_array[port_id]);
writel(temp, port->addr);
}
/* Test and clear port RWC bit */
void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
int port_id, u32 port_bit)
void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port,
u32 port_bit)
{
u32 temp;
temp = readl(port_array[port_id]);
temp = readl(port->addr);
if (temp & port_bit) {
temp = xhci_port_state_to_neutral(temp);
temp |= port_bit;
writel(temp, port_array[port_id]);
writel(temp, port->addr);
}
}
@@ -794,7 +784,7 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status,
u16 wIndex)
{
u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1);
u32 all_ports_seen_u0 = ((1 << xhci->usb3_rhub.num_ports) - 1);
bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0);
if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK))
@@ -840,8 +830,7 @@ static u32 xhci_get_ext_port_status(u32 raw_port_status, u32 port_li)
*/
static u32 xhci_get_port_status(struct usb_hcd *hcd,
struct xhci_bus_state *bus_state,
__le32 __iomem **port_array,
u16 wIndex, u32 raw_port_status,
u16 wIndex, u32 raw_port_status,
unsigned long flags)
__releases(&xhci->lock)
__acquires(&xhci->lock)
@@ -849,6 +838,11 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 status = 0;
int slot_id;
struct xhci_hub *rhub;
struct xhci_port *port;
rhub = xhci_get_rhub(hcd);
port = rhub->ports[wIndex];
/* wPortChange bits */
if (raw_port_status & PORT_CSC)
@@ -919,10 +913,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
set_bit(wIndex, &bus_state->rexit_ports);
xhci_test_and_clear_bit(xhci, port_array, wIndex,
PORT_PLC);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
xhci_set_link_state(xhci, port, XDEV_U0);
spin_unlock_irqrestore(&xhci->lock, flags);
time_left = wait_for_completion_timeout(
@@ -940,7 +932,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
}
xhci_ring_device(xhci, slot_id);
} else {
int port_status = readl(port_array[wIndex]);
int port_status = readl(port->addr);
xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
XHCI_MAX_REXIT_TIMEOUT,
port_status);
@@ -1024,15 +1016,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
unsigned long flags;
u32 temp, status;
int retval = 0;
__le32 __iomem **port_array;
int slot_id;
struct xhci_bus_state *bus_state;
u16 link_state = 0;
u16 wake_mask = 0;
u16 timeout = 0;
u16 test_mode = 0;
struct xhci_hub *rhub;
struct xhci_port **ports;
max_ports = xhci_get_ports(hcd, &port_array);
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &xhci->bus_state[hcd_index(hcd)];
spin_lock_irqsave(&xhci->lock, flags);
@@ -1070,15 +1065,15 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (!wIndex || wIndex > max_ports)
goto error;
wIndex--;
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
break;
}
trace_xhci_get_port_status(wIndex, temp);
status = xhci_get_port_status(hcd, bus_state, port_array,
wIndex, temp, flags);
status = xhci_get_port_status(hcd, bus_state, wIndex, temp,
flags);
if (status == 0xffffffff)
goto error;
@@ -1096,7 +1091,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
retval = -EINVAL;
break;
}
port_li = readl(port_array[wIndex] + PORTLI);
port_li = readl(ports[wIndex]->addr + PORTLI);
status = xhci_get_ext_port_status(temp, port_li);
put_unaligned_le32(cpu_to_le32(status), &buf[4]);
}
@@ -1114,7 +1109,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (!wIndex || wIndex > max_ports)
goto error;
wIndex--;
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1124,10 +1119,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* FIXME: What new port features do we need to support? */
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
if ((temp & PORT_PLS_MASK) != XDEV_U0) {
/* Resume the port to U0 first */
xhci_set_link_state(xhci, port_array, wIndex,
xhci_set_link_state(xhci, ports[wIndex],
XDEV_U0);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10);
@@ -1137,7 +1132,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* a port unless the port reports that it is in the
* enabled (PED = 1,PLS < 3) state.
*/
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
xhci_warn(xhci, "USB core suspending device not in U0/U1/U2.\n");
@@ -1155,18 +1150,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex, XDEV_U3);
xhci_set_link_state(xhci, ports[wIndex], XDEV_U3);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
bus_state->suspended_ports |= 1 << wIndex;
break;
case USB_PORT_FEAT_LINK_STATE:
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
/* Disable port */
if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
xhci_dbg(xhci, "Disable port %d\n", wIndex);
@@ -1178,17 +1172,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp |= PORT_CSC | PORT_PEC | PORT_WRC |
PORT_OCC | PORT_RC | PORT_PLC |
PORT_CEC;
writel(temp | PORT_PE, port_array[wIndex]);
temp = readl(port_array[wIndex]);
writel(temp | PORT_PE, ports[wIndex]->addr);
temp = readl(ports[wIndex]->addr);
break;
}
/* Put link in RxDetect (enable port) */
if (link_state == USB_SS_PORT_LS_RX_DETECT) {
xhci_dbg(xhci, "Enable port %d\n", wIndex);
xhci_set_link_state(xhci, port_array, wIndex,
link_state);
temp = readl(port_array[wIndex]);
xhci_set_link_state(xhci, ports[wIndex],
link_state);
temp = readl(ports[wIndex]->addr);
break;
}
@@ -1219,9 +1213,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_dbg(xhci, "Enable compliance mode transition for port %d\n",
wIndex);
xhci_set_link_state(xhci, port_array, wIndex,
xhci_set_link_state(xhci, ports[wIndex],
link_state);
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
break;
}
/* Port must be enabled */
@@ -1248,14 +1243,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
}
xhci_set_link_state(xhci, port_array, wIndex,
link_state);
xhci_set_link_state(xhci, ports[wIndex], link_state);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
if (link_state == USB_SS_PORT_LS_U3)
bus_state->suspended_ports |= 1 << wIndex;
break;
@@ -1270,40 +1264,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_RESET:
temp = (temp | PORT_RESET);
writel(temp, port_array[wIndex]);
writel(temp, ports[wIndex]->addr);
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
break;
case USB_PORT_FEAT_REMOTE_WAKE_MASK:
xhci_set_remote_wake_mask(xhci, port_array,
wIndex, wake_mask);
temp = readl(port_array[wIndex]);
xhci_set_remote_wake_mask(xhci, ports[wIndex],
wake_mask);
temp = readl(ports[wIndex]->addr);
xhci_dbg(xhci, "set port remote wake mask, "
"actual port %d status = 0x%x\n",
wIndex, temp);
break;
case USB_PORT_FEAT_BH_PORT_RESET:
temp |= PORT_WR;
writel(temp, port_array[wIndex]);
temp = readl(port_array[wIndex]);
writel(temp, ports[wIndex]->addr);
temp = readl(ports[wIndex]->addr);
break;
case USB_PORT_FEAT_U1_TIMEOUT:
if (hcd->speed < HCD_USB3)
goto error;
temp = readl(port_array[wIndex] + PORTPMSC);
temp = readl(ports[wIndex]->addr + PORTPMSC);
temp &= ~PORT_U1_TIMEOUT_MASK;
temp |= PORT_U1_TIMEOUT(timeout);
writel(temp, port_array[wIndex] + PORTPMSC);
writel(temp, ports[wIndex]->addr + PORTPMSC);
break;
case USB_PORT_FEAT_U2_TIMEOUT:
if (hcd->speed < HCD_USB3)
goto error;
temp = readl(port_array[wIndex] + PORTPMSC);
temp = readl(ports[wIndex]->addr + PORTPMSC);
temp &= ~PORT_U2_TIMEOUT_MASK;
temp |= PORT_U2_TIMEOUT(timeout);
writel(temp, port_array[wIndex] + PORTPMSC);
writel(temp, ports[wIndex]->addr + PORTPMSC);
break;
case USB_PORT_FEAT_TEST:
/* 4.19.6 Port Test Modes (USB2 Test Mode) */
@@ -1318,13 +1311,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
/* unblock any posted writes */
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
break;
case ClearPortFeature:
if (!wIndex || wIndex > max_ports)
goto error;
wIndex--;
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1334,7 +1327,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
temp = readl(port_array[wIndex]);
temp = readl(ports[wIndex]->addr);
xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
xhci_dbg(xhci, "PORTSC %04x\n", temp);
if (temp & PORT_RESET)
@@ -1344,12 +1337,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
set_bit(wIndex, &bus_state->resuming_ports);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_RESUME);
xhci_set_link_state(xhci, ports[wIndex],
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex,
xhci_set_link_state(xhci, ports[wIndex],
XDEV_U0);
clear_bit(wIndex, &bus_state->resuming_ports);
}
@@ -1374,11 +1367,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_C_PORT_LINK_STATE:
case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
xhci_clear_port_change_bit(xhci, wValue, wIndex,
port_array[wIndex], temp);
ports[wIndex]->addr, temp);
break;
case USB_PORT_FEAT_ENABLE:
xhci_disable_port(hcd, xhci, wIndex,
port_array[wIndex], temp);
ports[wIndex]->addr, temp);
break;
case USB_PORT_FEAT_POWER:
xhci_set_port_power(xhci, hcd, wIndex, false, &flags);
@@ -1415,11 +1408,14 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
int i, retval;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int max_ports;
__le32 __iomem **port_array;
struct xhci_bus_state *bus_state;
bool reset_change = false;
struct xhci_hub *rhub;
struct xhci_port **ports;
max_ports = xhci_get_ports(hcd, &port_array);
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &xhci->bus_state[hcd_index(hcd)];
/* Initial status is no changes */
@@ -1437,7 +1433,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
for (i = 0; i < max_ports; i++) {
temp = readl(port_array[i]);
temp = readl(ports[i]->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1469,11 +1465,14 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int max_ports, port_index;
__le32 __iomem **port_array;
struct xhci_bus_state *bus_state;
unsigned long flags;
struct xhci_hub *rhub;
struct xhci_port **ports;
max_ports = xhci_get_ports(hcd, &port_array);
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &xhci->bus_state[hcd_index(hcd)];
spin_lock_irqsave(&xhci->lock, flags);
@@ -1494,7 +1493,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
u32 t1, t2;
int slot_id;
t1 = readl(port_array[port_index]);
t1 = readl(ports[port_index]->addr);
t2 = xhci_port_state_to_neutral(t1);
if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
@@ -1534,7 +1533,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t1 = xhci_port_state_to_neutral(t1);
if (t1 != t2)
writel(t2, port_array[port_index]);
writel(t2, ports[port_index]->addr);
}
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1547,12 +1546,11 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
* warm reset a USB3 device stuck in polling or compliance mode after resume.
* See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
*/
static bool xhci_port_missing_cas_quirk(int port_index,
__le32 __iomem **port_array)
static bool xhci_port_missing_cas_quirk(struct xhci_port *port)
{
u32 portsc;
portsc = readl(port_array[port_index]);
portsc = readl(port->addr);
/* if any of these are set we are not stuck */
if (portsc & (PORT_CONNECT | PORT_CAS))
@@ -1565,9 +1563,9 @@ static bool xhci_port_missing_cas_quirk(int port_index,
/* clear wakeup/change bits, and do a warm port reset */
portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
portsc |= PORT_WR;
writel(portsc, port_array[port_index]);
writel(portsc, port->addr);
/* flush write */
readl(port_array[port_index]);
readl(port->addr);
return true;
}
@@ -1575,15 +1573,18 @@ int xhci_bus_resume(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_bus_state *bus_state;
__le32 __iomem **port_array;
unsigned long flags;
int max_ports, port_index;
int slot_id;
int sret;
u32 next_state;
u32 temp, portsc;
struct xhci_hub *rhub;
struct xhci_port **ports;
max_ports = xhci_get_ports(hcd, &port_array);
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &xhci->bus_state[hcd_index(hcd)];
if (time_before(jiffies, bus_state->next_statechange))
@@ -1608,12 +1609,12 @@ int xhci_bus_resume(struct usb_hcd *hcd)
port_index = max_ports;
while (port_index--) {
portsc = readl(port_array[port_index]);
portsc = readl(ports[port_index]->addr);
/* warm reset CAS limited ports stuck in polling/compliance */
if ((xhci->quirks & XHCI_MISSING_CAS) &&
(hcd->speed >= HCD_USB3) &&
xhci_port_missing_cas_quirk(port_index, port_array)) {
xhci_port_missing_cas_quirk(ports[port_index])) {
xhci_dbg(xhci, "reset stuck port %d\n", port_index);
clear_bit(port_index, &bus_state->bus_suspended);
continue;
@@ -1637,7 +1638,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
}
/* disable wake for all ports, write new link state if needed */
portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
writel(portsc, port_array[port_index]);
writel(portsc, ports[port_index]->addr);
}
/* USB2 specific resume signaling delay and U0 link state transition */
@@ -1650,23 +1651,22 @@ int xhci_bus_resume(struct usb_hcd *hcd)
for_each_set_bit(port_index, &bus_state->bus_suspended,
BITS_PER_LONG) {
/* Clear PLC to poll it later for U0 transition */
xhci_test_and_clear_bit(xhci, port_array, port_index,
xhci_test_and_clear_bit(xhci, ports[port_index],
PORT_PLC);
xhci_set_link_state(xhci, port_array, port_index,
XDEV_U0);
xhci_set_link_state(xhci, ports[port_index], XDEV_U0);
}
}
/* poll for U0 link state complete, both USB2 and USB3 */
for_each_set_bit(port_index, &bus_state->bus_suspended, BITS_PER_LONG) {
sret = xhci_handshake(port_array[port_index], PORT_PLC,
sret = xhci_handshake(ports[port_index]->addr, PORT_PLC,
PORT_PLC, 10 * 1000);
if (sret) {
xhci_warn(xhci, "port %d resume PLC timeout\n",
port_index);
continue;
}
xhci_test_and_clear_bit(xhci, port_array, port_index, PORT_PLC);
xhci_test_and_clear_bit(xhci, ports[port_index], PORT_PLC);
slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1);
if (slot_id)
xhci_ring_device(xhci, slot_id);

View File

@@ -33,8 +33,9 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
struct xhci_segment *seg;
dma_addr_t dma;
int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
seg = kzalloc(sizeof *seg, flags);
seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
if (!seg)
return NULL;
@@ -45,7 +46,8 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
}
if (max_packet) {
seg->bounce_buf = kzalloc(max_packet, flags);
seg->bounce_buf = kzalloc_node(max_packet, flags,
dev_to_node(dev));
if (!seg->bounce_buf) {
dma_pool_free(xhci->segment_pool, seg->trbs, dma);
kfree(seg);
@@ -363,8 +365,9 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
{
struct xhci_ring *ring;
int ret;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
ring = kzalloc(sizeof *(ring), flags);
ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
if (!ring)
return NULL;
@@ -458,11 +461,12 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
struct xhci_container_ctx *ctx;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
return NULL;
ctx = kzalloc(sizeof(*ctx), flags);
ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
if (!ctx)
return NULL;
@@ -615,6 +619,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
struct xhci_ring *cur_ring;
u64 addr;
int ret;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
xhci_dbg(xhci, "Allocating %u streams and %u "
"stream context array entries.\n",
@@ -625,7 +630,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
}
xhci->cmd_ring_reserved_trbs++;
stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
dev_to_node(dev));
if (!stream_info)
goto cleanup_trbs;
@@ -633,9 +639,9 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
stream_info->num_stream_ctxs = num_stream_ctxs;
/* Initialize the array of virtual pointers to stream rings. */
stream_info->stream_rings = kzalloc(
sizeof(struct xhci_ring *)*num_streams,
mem_flags);
stream_info->stream_rings = kcalloc_node(
num_streams, sizeof(struct xhci_ring *), mem_flags,
dev_to_node(dev));
if (!stream_info->stream_rings)
goto cleanup_info;
@@ -831,6 +837,7 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct xhci_tt_bw_info *tt_info;
unsigned int num_ports;
int i, j;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if (!tt->multi)
num_ports = 1;
@@ -840,7 +847,8 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
for (i = 0; i < num_ports; i++, tt_info++) {
struct xhci_interval_bw_table *bw_table;
tt_info = kzalloc(sizeof(*tt_info), mem_flags);
tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
dev_to_node(dev));
if (!tt_info)
goto free_tts;
INIT_LIST_HEAD(&tt_info->tt_list);
@@ -1054,8 +1062,7 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
/*
* The xHCI roothub may have ports of differing speeds in any order in the port
* status registers. xhci->port_array provides an array of the port speed for
* each offset into the port status registers.
* status registers.
*
* The xHCI hardware wants to know the roothub port number that the USB device
* is attached to (or the roothub port its ancestor hub is attached to). All we
@@ -1642,7 +1649,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
if (!num_sp)
return 0;
xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
dev_to_node(dev));
if (!xhci->scratchpad)
goto fail_sp;
@@ -1652,7 +1660,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->scratchpad->sp_array)
goto fail_sp2;
xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
flags, dev_to_node(dev));
if (!xhci->scratchpad->sp_buffers)
goto fail_sp3;
@@ -1720,14 +1729,16 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_completion, gfp_t mem_flags)
{
struct xhci_command *command;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
command = kzalloc(sizeof(*command), mem_flags);
command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
if (!command)
return NULL;
if (allocate_completion) {
command->completion =
kzalloc(sizeof(struct completion), mem_flags);
kzalloc_node(sizeof(struct completion), mem_flags,
dev_to_node(dev));
if (!command->completion) {
kfree(command);
return NULL;
@@ -1890,18 +1901,18 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
no_bw:
xhci->cmd_ring_reserved_trbs = 0;
xhci->num_usb2_ports = 0;
xhci->num_usb3_ports = 0;
xhci->usb2_rhub.num_ports = 0;
xhci->usb3_rhub.num_ports = 0;
xhci->num_active_eps = 0;
kfree(xhci->usb2_ports);
kfree(xhci->usb3_ports);
kfree(xhci->port_array);
kfree(xhci->usb2_rhub.ports);
kfree(xhci->usb3_rhub.ports);
kfree(xhci->hw_ports);
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
xhci->usb2_ports = NULL;
xhci->usb3_ports = NULL;
xhci->port_array = NULL;
xhci->usb2_rhub.ports = NULL;
xhci->usb3_rhub.ports = NULL;
xhci->hw_ports = NULL;
xhci->rh_bw = NULL;
xhci->ext_caps = NULL;
@@ -2100,6 +2111,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
int i;
u8 major_revision, minor_revision;
struct xhci_hub *rhub;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
temp = readl(addr);
major_revision = XHCI_EXT_PORT_MAJOR(temp);
@@ -2136,8 +2148,8 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
if (rhub->psi_count) {
rhub->psi = kcalloc(rhub->psi_count, sizeof(*rhub->psi),
GFP_KERNEL);
rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
GFP_KERNEL, dev_to_node(dev));
if (!rhub->psi)
rhub->psi_count = 0;
@@ -2186,36 +2198,53 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
port_offset--;
for (i = port_offset; i < (port_offset + port_count); i++) {
struct xhci_port *hw_port = &xhci->hw_ports[i];
/* Duplicate entry. Ignore the port if the revisions differ. */
if (xhci->port_array[i] != 0) {
if (hw_port->rhub) {
xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
" port %u\n", addr, i);
xhci_warn(xhci, "Port was marked as USB %u, "
"duplicated as USB %u\n",
xhci->port_array[i], major_revision);
hw_port->rhub->maj_rev, major_revision);
/* Only adjust the roothub port counts if we haven't
* found a similar duplicate.
*/
if (xhci->port_array[i] != major_revision &&
xhci->port_array[i] != DUPLICATE_ENTRY) {
if (xhci->port_array[i] == 0x03)
xhci->num_usb3_ports--;
else
xhci->num_usb2_ports--;
xhci->port_array[i] = DUPLICATE_ENTRY;
if (hw_port->rhub != rhub &&
hw_port->hcd_portnum != DUPLICATE_ENTRY) {
hw_port->rhub->num_ports--;
hw_port->hcd_portnum = DUPLICATE_ENTRY;
}
/* FIXME: Should we disable the port? */
continue;
}
xhci->port_array[i] = major_revision;
if (major_revision == 0x03)
xhci->num_usb3_ports++;
else
xhci->num_usb2_ports++;
hw_port->rhub = rhub;
rhub->num_ports++;
}
/* FIXME: Should we disable ports not in the Extended Capabilities? */
}
static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
struct xhci_hub *rhub, gfp_t flags)
{
int port_index = 0;
int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if (!rhub->num_ports)
return;
rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags,
dev_to_node(dev));
for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
if (xhci->hw_ports[i].rhub != rhub ||
xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
continue;
xhci->hw_ports[i].hcd_portnum = port_index;
rhub->ports[port_index] = &xhci->hw_ports[i];
port_index++;
if (port_index == rhub->num_ports)
break;
}
}
/*
* Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
* specify what speeds each port is supposed to be. We can't count on the port
@@ -2228,16 +2257,25 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
void __iomem *base;
u32 offset;
unsigned int num_ports;
int i, j, port_index;
int i, j;
int cap_count = 0;
u32 cap_start;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
if (!xhci->port_array)
xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
flags, dev_to_node(dev));
if (!xhci->hw_ports)
return -ENOMEM;
xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
for (i = 0; i < num_ports; i++) {
xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
NUM_PORT_REGS * i;
xhci->hw_ports[i].hw_portnum = i;
}
xhci->rh_bw = kzalloc_node(sizeof(*xhci->rh_bw)*num_ports, flags,
dev_to_node(dev));
if (!xhci->rh_bw)
return -ENOMEM;
for (i = 0; i < num_ports; i++) {
@@ -2264,7 +2302,8 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
XHCI_EXT_CAPS_PROTOCOL);
}
xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
flags, dev_to_node(dev));
if (!xhci->ext_caps)
return -ENOMEM;
@@ -2272,86 +2311,44 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
while (offset) {
xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
if (xhci->num_usb2_ports + xhci->num_usb3_ports == num_ports)
if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
num_ports)
break;
offset = xhci_find_next_ext_cap(base, offset,
XHCI_EXT_CAPS_PROTOCOL);
}
if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
xhci_warn(xhci, "No ports on the roothubs?\n");
return -ENODEV;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Found %u USB 2.0 ports and %u USB 3.0 ports.",
xhci->num_usb2_ports, xhci->num_usb3_ports);
"Found %u USB 2.0 ports and %u USB 3.0 ports.",
xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
/* Place limits on the number of roothub ports so that the hub
* descriptors aren't longer than the USB core will allocate.
*/
if (xhci->num_usb3_ports > USB_SS_MAXPORTS) {
if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Limiting USB 3.0 roothub ports to %u.",
USB_SS_MAXPORTS);
xhci->num_usb3_ports = USB_SS_MAXPORTS;
xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
}
if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Limiting USB 2.0 roothub ports to %u.",
USB_MAXCHILDREN);
xhci->num_usb2_ports = USB_MAXCHILDREN;
xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
}
/*
* Note we could have all USB 3.0 ports, or all USB 2.0 ports.
* Not sure how the USB core will handle a hub with no ports...
*/
if (xhci->num_usb2_ports) {
xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
xhci->num_usb2_ports, flags);
if (!xhci->usb2_ports)
return -ENOMEM;
port_index = 0;
for (i = 0; i < num_ports; i++) {
if (xhci->port_array[i] == 0x03 ||
xhci->port_array[i] == 0 ||
xhci->port_array[i] == DUPLICATE_ENTRY)
continue;
xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
xhci->usb2_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"USB 2.0 port at index %u, "
"addr = %p", i,
xhci->usb2_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb2_ports)
break;
}
}
if (xhci->num_usb3_ports) {
xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
xhci->num_usb3_ports, flags);
if (!xhci->usb3_ports)
return -ENOMEM;
port_index = 0;
for (i = 0; i < num_ports; i++)
if (xhci->port_array[i] == 0x03) {
xhci->usb3_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"USB 3.0 port at index %u, "
"addr = %p", i,
xhci->usb3_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb3_ports)
break;
}
}
return 0;
}

View File

@@ -58,7 +58,7 @@ static int get_bw_index(struct xhci_hcd *xhci, struct usb_device *udev,
bw_index = (virt_dev->real_port - 1) * 2 + 1;
} else {
/* add one more for each SS port */
bw_index = virt_dev->real_port + xhci->num_usb3_ports - 1;
bw_index = virt_dev->real_port + xhci->usb3_rhub.num_ports - 1;
}
return bw_index;
@@ -284,7 +284,7 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
int i;
/* ss IN and OUT are separated */
num_usb_bus = xhci->num_usb3_ports * 2 + xhci->num_usb2_ports;
num_usb_bus = xhci->usb3_rhub.num_ports * 2 + xhci->usb2_rhub.num_ports;
sch_array = kcalloc(num_usb_bus, sizeof(*sch_array), GFP_KERNEL);
if (sch_array == NULL)

View File

@@ -196,11 +196,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0014)
pdev->device == 0x0014) {
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_ZERO_64B_REGS;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0015)
pdev->device == 0x0015) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci->quirks |= XHCI_ZERO_64B_REGS;
}
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -284,13 +288,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
driver = (struct hc_driver *)id->driver_data;
/* For some HW implementation, a XHCI reset is just not enough... */
if (usb_xhci_needs_pci_reset(dev)) {
dev_info(&dev->dev, "Resetting\n");
if (pci_reset_function_locked(dev))
dev_warn(&dev->dev, "Reset failed");
}
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
pm_runtime_get_noresume(&dev->dev);

View File

@@ -1497,44 +1497,6 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
handle_cmd_completion(xhci, &event->event_cmd);
}
/* @port_id: the one-based port ID from the hardware (indexed from array of all
* port registers -- USB 3.0 and USB 2.0).
*
* Returns a zero-based port number, which is suitable for indexing into each of
* the split roothubs' port arrays and bus state arrays.
* Add one to it in order to call xhci_find_slot_id_by_port.
*/
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
struct xhci_hcd *xhci, u32 port_id)
{
unsigned int i;
unsigned int num_similar_speed_ports = 0;
/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
* and usb2_ports are 0-based indexes. Count the number of similar
* speed ports, up to 1 port before this port.
*/
for (i = 0; i < (port_id - 1); i++) {
u8 port_speed = xhci->port_array[i];
/*
* Skip ports that don't have known speeds, or have duplicate
* Extended Capabilities port speed entries.
*/
if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
continue;
/*
* USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
* 1.1 ports are under the USB 2.0 hub. If the port speed
* matches the device speed, it's a similar speed port.
*/
if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
num_similar_speed_ports++;
}
return num_similar_speed_ports;
}
static void handle_device_notification(struct xhci_hcd *xhci,
union xhci_trb *event)
{
@@ -1563,11 +1525,10 @@ static void handle_port_status(struct xhci_hcd *xhci,
u32 portsc, cmd_reg;
int max_ports;
int slot_id;
unsigned int faked_port_index;
u8 major_revision;
unsigned int hcd_portnum;
struct xhci_bus_state *bus_state;
__le32 __iomem **port_array;
bool bogus_port_status = false;
struct xhci_port *port;
/* Port status change events always have a successful completion code */
if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
@@ -1584,49 +1545,19 @@ static void handle_port_status(struct xhci_hcd *xhci,
return;
}
/* Figure out which usb_hcd this port is attached to:
* is it a USB 3.0 port or a USB 2.0/1.1 port?
*/
major_revision = xhci->port_array[port_id - 1];
/* Find the right roothub. */
hcd = xhci_to_hcd(xhci);
if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
hcd = xhci->shared_hcd;
if (major_revision == 0) {
xhci_warn(xhci, "Event for port %u not in "
"Extended Capabilities, ignoring.\n",
port_id);
bogus_port_status = true;
goto cleanup;
}
if (major_revision == DUPLICATE_ENTRY) {
xhci_warn(xhci, "Event for port %u duplicated in"
"Extended Capabilities, ignoring.\n",
port_id);
port = &xhci->hw_ports[port_id - 1];
if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
xhci_warn(xhci, "Event for invalid port %u\n", port_id);
bogus_port_status = true;
goto cleanup;
}
/*
* Hardware port IDs reported by a Port Status Change Event include USB
* 3.0 and USB 2.0 ports. We want to check if the port has reported a
* resume event, but we first need to translate the hardware port ID
* into the index into the ports on the correct split roothub, and the
* correct bus_state structure.
*/
hcd = port->rhub->hcd;
bus_state = &xhci->bus_state[hcd_index(hcd)];
if (hcd->speed >= HCD_USB3)
port_array = xhci->usb3_ports;
else
port_array = xhci->usb2_ports;
/* Find the faked port hub number */
faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
port_id);
portsc = readl(port_array[faked_port_index]);
hcd_portnum = port->hcd_portnum;
portsc = readl(port->addr);
trace_xhci_handle_port_status(faked_port_index, portsc);
trace_xhci_handle_port_status(hcd_portnum, portsc);
if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
@@ -1634,7 +1565,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE)
bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
@@ -1651,29 +1582,26 @@ static void handle_port_status(struct xhci_hcd *xhci,
* so we can tell the difference between the end of
* device and host initiated resume.
*/
bus_state->port_remote_wakeup |= 1 << faked_port_index;
xhci_test_and_clear_bit(xhci, port_array,
faked_port_index, PORT_PLC);
xhci_set_link_state(xhci, port_array, faked_port_index,
XDEV_U0);
bus_state->port_remote_wakeup |= 1 << hcd_portnum;
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
xhci_set_link_state(xhci, port, XDEV_U0);
/* Need to wait until the next link state change
* indicates the device is actually in U0.
*/
bogus_port_status = true;
goto cleanup;
} else if (!test_bit(faked_port_index,
&bus_state->resuming_ports)) {
} else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
bus_state->resume_done[faked_port_index] = jiffies +
bus_state->resume_done[hcd_portnum] = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(faked_port_index, &bus_state->resuming_ports);
set_bit(hcd_portnum, &bus_state->resuming_ports);
/* Do the rest in GetPortStatus after resume time delay.
* Avoid polling roothub status before that so that a
* usb device auto-resume latency around ~40ms.
*/
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[faked_port_index]);
bus_state->resume_done[hcd_portnum]);
bogus_port_status = true;
}
}
@@ -1688,17 +1616,14 @@ static void handle_port_status(struct xhci_hcd *xhci,
* so the roothub behavior is consistent with external
* USB 3.0 hub behavior.
*/
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
faked_port_index + 1);
slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
if (slot_id && xhci->devs[slot_id])
xhci_ring_device(xhci, slot_id);
if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
bus_state->port_remote_wakeup &=
~(1 << faked_port_index);
xhci_test_and_clear_bit(xhci, port_array,
faked_port_index, PORT_PLC);
if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
faked_port_index + 1);
hcd_portnum + 1);
bogus_port_status = true;
goto cleanup;
}
@@ -1710,16 +1635,15 @@ static void handle_port_status(struct xhci_hcd *xhci,
* out of the RExit state.
*/
if (!DEV_SUPERSPEED_ANY(portsc) &&
test_and_clear_bit(faked_port_index,
test_and_clear_bit(hcd_portnum,
&bus_state->rexit_ports)) {
complete(&bus_state->rexit_done[faked_port_index]);
complete(&bus_state->rexit_done[hcd_portnum]);
bogus_port_status = true;
goto cleanup;
}
if (hcd->speed < HCD_USB3)
xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
PORT_PLC);
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
cleanup:
/* Update event ring dequeue pointer before dropping the lock */

View File

@@ -18,9 +18,11 @@
#include <linux/phy/tegra/xusb.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <soc/tegra/pmc.h>
#include "xhci.h"
@@ -761,6 +763,49 @@ static void tegra_xusb_phy_disable(struct tegra_xusb *tegra)
}
}
static int tegra_xusb_runtime_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
tegra_xusb_phy_disable(tegra);
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
tegra_xusb_clk_disable(tegra);
return 0;
}
static int tegra_xusb_runtime_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int err;
err = tegra_xusb_clk_enable(tegra);
if (err) {
dev_err(dev, "failed to enable clocks: %d\n", err);
return err;
}
err = regulator_bulk_enable(tegra->soc->num_supplies, tegra->supplies);
if (err) {
dev_err(dev, "failed to enable regulators: %d\n", err);
goto disable_clk;
}
err = tegra_xusb_phy_enable(tegra);
if (err < 0) {
dev_err(dev, "failed to enable PHYs: %d\n", err);
goto disable_regulator;
}
return 0;
disable_regulator:
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
disable_clk:
tegra_xusb_clk_disable(tegra);
return err;
}
static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
{
unsigned int code_tag_blocks, code_size_blocks, code_blocks;
@@ -930,20 +975,6 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (IS_ERR(tegra->padctl))
return PTR_ERR(tegra->padctl);
tegra->host_rst = devm_reset_control_get(&pdev->dev, "xusb_host");
if (IS_ERR(tegra->host_rst)) {
err = PTR_ERR(tegra->host_rst);
dev_err(&pdev->dev, "failed to get xusb_host reset: %d\n", err);
goto put_padctl;
}
tegra->ss_rst = devm_reset_control_get(&pdev->dev, "xusb_ss");
if (IS_ERR(tegra->ss_rst)) {
err = PTR_ERR(tegra->ss_rst);
dev_err(&pdev->dev, "failed to get xusb_ss reset: %d\n", err);
goto put_padctl;
}
tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host");
if (IS_ERR(tegra->host_clk)) {
err = PTR_ERR(tegra->host_clk);
@@ -1007,11 +1038,48 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_padctl;
}
if (!pdev->dev.pm_domain) {
tegra->host_rst = devm_reset_control_get(&pdev->dev,
"xusb_host");
if (IS_ERR(tegra->host_rst)) {
err = PTR_ERR(tegra->host_rst);
dev_err(&pdev->dev,
"failed to get xusb_host reset: %d\n", err);
goto put_padctl;
}
tegra->ss_rst = devm_reset_control_get(&pdev->dev, "xusb_ss");
if (IS_ERR(tegra->ss_rst)) {
err = PTR_ERR(tegra->ss_rst);
dev_err(&pdev->dev, "failed to get xusb_ss reset: %d\n",
err);
goto put_padctl;
}
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBA,
tegra->ss_clk,
tegra->ss_rst);
if (err) {
dev_err(&pdev->dev,
"failed to enable XUSBA domain: %d\n", err);
goto put_padctl;
}
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBC,
tegra->host_clk,
tegra->host_rst);
if (err) {
dev_err(&pdev->dev,
"failed to enable XUSBC domain: %d\n", err);
goto disable_xusba;
}
}
tegra->supplies = devm_kcalloc(&pdev->dev, tegra->soc->num_supplies,
sizeof(*tegra->supplies), GFP_KERNEL);
if (!tegra->supplies) {
err = -ENOMEM;
goto put_padctl;
goto disable_xusbc;
}
for (i = 0; i < tegra->soc->num_supplies; i++)
@@ -1021,7 +1089,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->supplies);
if (err) {
dev_err(&pdev->dev, "failed to get regulators: %d\n", err);
goto put_padctl;
goto disable_xusbc;
}
for (i = 0; i < tegra->soc->num_types; i++)
@@ -1031,7 +1099,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
sizeof(*tegra->phys), GFP_KERNEL);
if (!tegra->phys) {
err = -ENOMEM;
goto put_padctl;
goto disable_xusbc;
}
for (i = 0, k = 0; i < tegra->soc->num_types; i++) {
@@ -1047,44 +1115,18 @@ static int tegra_xusb_probe(struct platform_device *pdev)
"failed to get PHY %s: %ld\n", prop,
PTR_ERR(phy));
err = PTR_ERR(phy);
goto put_padctl;
goto disable_xusbc;
}
tegra->phys[k++] = phy;
}
}
err = tegra_xusb_clk_enable(tegra);
if (err) {
dev_err(&pdev->dev, "failed to enable clocks: %d\n", err);
goto put_padctl;
}
err = regulator_bulk_enable(tegra->soc->num_supplies, tegra->supplies);
if (err) {
dev_err(&pdev->dev, "failed to enable regulators: %d\n", err);
goto disable_clk;
}
err = tegra_xusb_phy_enable(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable PHYs: %d\n", err);
goto disable_regulator;
}
tegra_xusb_ipfs_config(tegra, regs);
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
goto disable_phy;
}
tegra->hcd = usb_create_hcd(&tegra_xhci_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!tegra->hcd) {
err = -ENOMEM;
goto disable_phy;
goto disable_xusbc;
}
/*
@@ -1093,6 +1135,25 @@ static int tegra_xusb_probe(struct platform_device *pdev)
*/
platform_set_drvdata(pdev, tegra);
pm_runtime_enable(&pdev->dev);
if (pm_runtime_enabled(&pdev->dev))
err = pm_runtime_get_sync(&pdev->dev);
else
err = tegra_xusb_runtime_resume(&pdev->dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable device: %d\n", err);
goto disable_rpm;
}
tegra_xusb_ipfs_config(tegra, regs);
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
goto put_rpm;
}
tegra->hcd->regs = tegra->regs;
tegra->hcd->rsrc_start = regs->start;
tegra->hcd->rsrc_len = resource_size(regs);
@@ -1100,7 +1161,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
err = usb_add_hcd(tegra->hcd, tegra->xhci_irq, IRQF_SHARED);
if (err < 0) {
dev_err(&pdev->dev, "failed to add USB HCD: %d\n", err);
goto put_usb2;
goto put_rpm;
}
device_wakeup_enable(tegra->hcd->self.controller);
@@ -1155,14 +1216,18 @@ put_usb3:
usb_put_hcd(xhci->shared_hcd);
remove_usb2:
usb_remove_hcd(tegra->hcd);
put_usb2:
put_rpm:
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_xusb_runtime_suspend(&pdev->dev);
disable_rpm:
pm_runtime_disable(&pdev->dev);
usb_put_hcd(tegra->hcd);
disable_phy:
tegra_xusb_phy_disable(tegra);
disable_regulator:
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
disable_clk:
tegra_xusb_clk_disable(tegra);
disable_xusbc:
if (!&pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
disable_xusba:
if (!&pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
put_padctl:
tegra_xusb_padctl_put(tegra->padctl);
return err;
@@ -1181,9 +1246,8 @@ static int tegra_xusb_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
tegra->fw.phys);
tegra_xusb_phy_disable(tegra);
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
tegra_xusb_clk_disable(tegra);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
tegra_xusb_padctl_put(tegra->padctl);
@@ -1211,6 +1275,8 @@ static int tegra_xusb_resume(struct device *dev)
#endif
static const struct dev_pm_ops tegra_xusb_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_xusb_runtime_suspend,
tegra_xusb_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra_xusb_suspend, tegra_xusb_resume)
};

View File

@@ -33,8 +33,8 @@ static int link_quirk;
module_param(link_quirk, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
static unsigned int quirks;
module_param(quirks, uint, S_IRUGO);
static unsigned long long quirks;
module_param(quirks, ullong, S_IRUGO);
MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
/* TODO: copied from ehci-hcd.c - can this be refactored? */
@@ -209,6 +209,68 @@ int xhci_reset(struct xhci_hcd *xhci)
return ret;
}
static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
int err, i;
u64 val;
/*
* Some Renesas controllers get into a weird state if they are
* reset while programmed with 64bit addresses (they will preserve
* the top half of the address in internal, non visible
* registers). You end up with half the address coming from the
* kernel, and the other half coming from the firmware. Also,
* changing the programming leads to extra accesses even if the
* controller is supposed to be halted. The controller ends up with
* a fatal fault, and is then ripe for being properly reset.
*
* Special care is taken to only apply this if the device is behind
* an iommu. Doing anything when there is no iommu is definitely
* unsafe...
*/
if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !dev->iommu_group)
return;
xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
/* Clear HSEIE so that faults do not get signaled */
val = readl(&xhci->op_regs->command);
val &= ~CMD_HSEIE;
writel(val, &xhci->op_regs->command);
/* Clear HSE (aka FATAL) */
val = readl(&xhci->op_regs->status);
val |= STS_FATAL;
writel(val, &xhci->op_regs->status);
/* Now zero the registers, and brace for impact */
val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
if (upper_32_bits(val))
xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if (upper_32_bits(val))
xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
struct xhci_intr_reg __iomem *ir;
ir = &xhci->run_regs->ir_set[i];
val = xhci_read_64(xhci, &ir->erst_base);
if (upper_32_bits(val))
xhci_write_64(xhci, 0, &ir->erst_base);
val= xhci_read_64(xhci, &ir->erst_dequeue);
if (upper_32_bits(val))
xhci_write_64(xhci, 0, &ir->erst_dequeue);
}
/* Wait for the fault to appear. It will be cleared on reset */
err = xhci_handshake(&xhci->op_regs->status,
STS_FATAL, STS_FATAL,
XHCI_MAX_HALT_USEC);
if (!err)
xhci_info(xhci, "Fault detected\n");
}
#ifdef CONFIG_USB_PCI
/*
@@ -400,13 +462,15 @@ static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
struct xhci_hub *rhub;
u32 temp;
int i;
xhci = from_timer(xhci, t, comp_mode_recovery_timer);
rhub = &xhci->usb3_rhub;
for (i = 0; i < xhci->num_usb3_ports; i++) {
temp = readl(xhci->usb3_ports[i]);
for (i = 0; i < rhub->num_ports; i++) {
temp = readl(rhub->ports[i]->addr);
if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
/*
* Compliance Mode Detected. Letting USB Core
@@ -426,7 +490,7 @@ static void compliance_mode_recovery(struct timer_list *t)
}
}
if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
mod_timer(&xhci->comp_mode_recovery_timer,
jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
}
@@ -483,7 +547,7 @@ static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
{
return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
}
@@ -812,33 +876,33 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
{
struct xhci_port **ports;
int port_index;
__le32 __iomem **port_array;
unsigned long flags;
u32 t1, t2;
spin_lock_irqsave(&xhci->lock, flags);
/* disable usb3 ports Wake bits */
port_index = xhci->num_usb3_ports;
port_array = xhci->usb3_ports;
port_index = xhci->usb3_rhub.num_ports;
ports = xhci->usb3_rhub.ports;
while (port_index--) {
t1 = readl(port_array[port_index]);
t1 = readl(ports[port_index]->addr);
t1 = xhci_port_state_to_neutral(t1);
t2 = t1 & ~PORT_WAKE_BITS;
if (t1 != t2)
writel(t2, port_array[port_index]);
writel(t2, ports[port_index]->addr);
}
/* disable usb2 ports Wake bits */
port_index = xhci->num_usb2_ports;
port_array = xhci->usb2_ports;
port_index = xhci->usb2_rhub.num_ports;
ports = xhci->usb2_rhub.ports;
while (port_index--) {
t1 = readl(port_array[port_index]);
t1 = readl(ports[port_index]->addr);
t1 = xhci_port_state_to_neutral(t1);
t2 = t1 & ~PORT_WAKE_BITS;
if (t1 != t2)
writel(t2, port_array[port_index]);
writel(t2, ports[port_index]->addr);
}
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1004,6 +1068,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_zero_64b_regs(xhci);
xhci_reset(xhci);
spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
@@ -3976,18 +4041,10 @@ static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
*/
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
__le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
__le32 __iomem *addr;
int raw_port;
struct xhci_hub *rhub;
if (hcd->speed < HCD_USB3)
addr = xhci->usb2_ports[port1 - 1];
else
addr = xhci->usb3_ports[port1 - 1];
raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
return raw_port;
rhub = xhci_get_rhub(hcd);
return rhub->ports[port1 - 1]->hw_portnum + 1;
}
/*
@@ -4120,7 +4177,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
struct usb_device *udev, int enable)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
__le32 __iomem **port_array;
struct xhci_port **ports;
__le32 __iomem *pm_addr, *hlpm_addr;
u32 pm_val, hlpm_val, field;
unsigned int port_num;
@@ -4141,11 +4198,11 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
spin_lock_irqsave(&xhci->lock, flags);
port_array = xhci->usb2_ports;
ports = xhci->usb2_rhub.ports;
port_num = udev->portnum - 1;
pm_addr = port_array[port_num] + PORTPMSC;
pm_addr = ports[port_num]->addr + PORTPMSC;
pm_val = readl(pm_addr);
hlpm_addr = port_array[port_num] + PORTHLPMC;
hlpm_addr = ports[port_num]->addr + PORTHLPMC;
field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
@@ -4858,6 +4915,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (usb_hcd_is_primary_hcd(hcd)) {
xhci->main_hcd = hcd;
xhci->usb2_rhub.hcd = hcd;
/* Mark the first roothub as being USB 2.0.
* The xHCI driver will register the USB 3.0 roothub.
*/
@@ -4883,6 +4941,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
minor_rev,
minor_rev ? "Enhanced" : "");
xhci->usb3_rhub.hcd = hcd;
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
*/
@@ -4921,6 +4980,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (retval)
return retval;
xhci_zero_64b_regs(xhci);
xhci_dbg(xhci, "Resetting HCD\n");
/* Reset the internal HC memory state and registers. */
retval = xhci_reset(xhci);
@@ -4963,7 +5024,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
return retval;
xhci_dbg(xhci, "Called HCD init\n");
xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
xhci->hcc_params, xhci->hci_version, xhci->quirks);
return 0;

View File

@@ -1683,13 +1683,23 @@ static inline unsigned int hcd_index(struct usb_hcd *hcd)
else
return 1;
}
struct xhci_port {
__le32 __iomem *addr;
int hw_portnum;
int hcd_portnum;
struct xhci_hub *rhub;
};
struct xhci_hub {
u8 maj_rev;
u8 min_rev;
u32 *psi; /* array of protocol speed ID entries */
u8 psi_count;
u8 psi_uid_count;
struct xhci_port **ports;
unsigned int num_ports;
struct usb_hcd *hcd;
/* supported prococol extended capabiliy values */
u8 maj_rev;
u8 min_rev;
u32 *psi; /* array of protocol speed ID entries */
u8 psi_count;
u8 psi_uid_count;
};
/* There is one xhci_hcd structure per controller */
@@ -1787,12 +1797,12 @@ struct xhci_hcd {
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
#define XHCI_STATE_REMOVING (1 << 2)
unsigned int quirks;
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
#define XHCI_AMD_PLL_FIX (1 << 3)
#define XHCI_SPURIOUS_SUCCESS (1 << 4)
unsigned long long quirks;
#define XHCI_LINK_TRB_QUIRK BIT_ULL(0)
#define XHCI_RESET_EP_QUIRK BIT_ULL(1)
#define XHCI_NEC_HOST BIT_ULL(2)
#define XHCI_AMD_PLL_FIX BIT_ULL(3)
#define XHCI_SPURIOUS_SUCCESS BIT_ULL(4)
/*
* Certain Intel host controllers have a limit to the number of endpoint
* contexts they can handle. Ideally, they would signal that they can't handle
@@ -1802,50 +1812,44 @@ struct xhci_hcd {
* commands, reset device commands, disable slot commands, and address device
* commands.
*/
#define XHCI_EP_LIMIT_QUIRK (1 << 5)
#define XHCI_BROKEN_MSI (1 << 6)
#define XHCI_RESET_ON_RESUME (1 << 7)
#define XHCI_SW_BW_CHECKING (1 << 8)
#define XHCI_AMD_0x96_HOST (1 << 9)
#define XHCI_TRUST_TX_LENGTH (1 << 10)
#define XHCI_LPM_SUPPORT (1 << 11)
#define XHCI_INTEL_HOST (1 << 12)
#define XHCI_SPURIOUS_REBOOT (1 << 13)
#define XHCI_COMP_MODE_QUIRK (1 << 14)
#define XHCI_AVOID_BEI (1 << 15)
#define XHCI_PLAT (1 << 16)
#define XHCI_SLOW_SUSPEND (1 << 17)
#define XHCI_SPURIOUS_WAKEUP (1 << 18)
#define XHCI_EP_LIMIT_QUIRK BIT_ULL(5)
#define XHCI_BROKEN_MSI BIT_ULL(6)
#define XHCI_RESET_ON_RESUME BIT_ULL(7)
#define XHCI_SW_BW_CHECKING BIT_ULL(8)
#define XHCI_AMD_0x96_HOST BIT_ULL(9)
#define XHCI_TRUST_TX_LENGTH BIT_ULL(10)
#define XHCI_LPM_SUPPORT BIT_ULL(11)
#define XHCI_INTEL_HOST BIT_ULL(12)
#define XHCI_SPURIOUS_REBOOT BIT_ULL(13)
#define XHCI_COMP_MODE_QUIRK BIT_ULL(14)
#define XHCI_AVOID_BEI BIT_ULL(15)
#define XHCI_PLAT BIT_ULL(16)
#define XHCI_SLOW_SUSPEND BIT_ULL(17)
#define XHCI_SPURIOUS_WAKEUP BIT_ULL(18)
/* For controllers with a broken beyond repair streams implementation */
#define XHCI_BROKEN_STREAMS (1 << 19)
#define XHCI_PME_STUCK_QUIRK (1 << 20)
#define XHCI_MTK_HOST (1 << 21)
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
#define XHCI_NO_64BIT_SUPPORT (1 << 23)
#define XHCI_MISSING_CAS (1 << 24)
#define XHCI_BROKEN_STREAMS BIT_ULL(19)
#define XHCI_PME_STUCK_QUIRK BIT_ULL(20)
#define XHCI_MTK_HOST BIT_ULL(21)
#define XHCI_SSIC_PORT_UNUSED BIT_ULL(22)
#define XHCI_NO_64BIT_SUPPORT BIT_ULL(23)
#define XHCI_MISSING_CAS BIT_ULL(24)
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
#define XHCI_U2_DISABLE_WAKE (1 << 27)
#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
#define XHCI_HW_LPM_DISABLE (1 << 29)
#define XHCI_SUSPEND_DELAY (1 << 30)
#define XHCI_INTEL_USB_ROLE_SW (1 << 31)
#define XHCI_BROKEN_PORT_PED BIT_ULL(25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 BIT_ULL(26)
#define XHCI_U2_DISABLE_WAKE BIT_ULL(27)
#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL BIT_ULL(28)
#define XHCI_HW_LPM_DISABLE BIT_ULL(29)
#define XHCI_SUSPEND_DELAY BIT_ULL(30)
#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
#define XHCI_ZERO_64B_REGS BIT_ULL(32)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
struct xhci_bus_state bus_state[2];
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
u8 *port_array;
/* Array of pointers to USB 3.0 PORTSC registers */
__le32 __iomem **usb3_ports;
unsigned int num_usb3_ports;
/* Array of pointers to USB 2.0 PORTSC registers */
__le32 __iomem **usb2_ports;
struct xhci_port *hw_ports;
struct xhci_hub usb2_rhub;
struct xhci_hub usb3_rhub;
unsigned int num_usb2_ports;
/* support xHCI 0.96 spec USB2 software LPM */
unsigned sw_lpm_support:1;
/* support xHCI 1.0 spec USB2 hardware LPM */
@@ -2091,14 +2095,16 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
unsigned int count_trbs(u64 addr, u64 len);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
int port_id, u32 link_state);
void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
int port_id, u32 port_bit);
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
u32 link_state);
void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port,
u32 port_bit);
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
void xhci_hc_died(struct xhci_hcd *xhci);
#ifdef CONFIG_PM

View File

@@ -31,7 +31,7 @@ static void isp1760_init_core(struct isp1760_device *isp)
/* Low-level chip reset */
if (isp->rst_gpio) {
gpiod_set_value_cansleep(isp->rst_gpio, 1);
mdelay(50);
msleep(50);
gpiod_set_value_cansleep(isp->rst_gpio, 0);
}

Some files were not shown because too many files have changed in this diff Show More