
Make sure to release any irq only after the class device has been deregistered. This avoids a race between gpiod_unexport and edge_store, where an irq could be allocated just before the gpio class device is deregistered without relying on FLAG_EXPORT and the global sysfs lock. Note that there is no need to hold the sysfs lock when releasing the irq after the class device is gone as kernfs will prevent further attribute operations. Signed-off-by: Johan Hovold <johan@kernel.org> Reviewed-by: Alexandre Courbot <acourbot@nvidia.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
811 lines
19 KiB
C
811 lines
19 KiB
C
#include <linux/idr.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/device.h>
|
|
#include <linux/sysfs.h>
|
|
#include <linux/gpio/consumer.h>
|
|
#include <linux/gpio/driver.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kdev_t.h>
|
|
|
|
#include "gpiolib.h"
|
|
|
|
static DEFINE_IDR(dirent_idr);
|
|
|
|
|
|
/* lock protects against unexport_gpio() being called while
|
|
* sysfs files are active.
|
|
*/
|
|
static DEFINE_MUTEX(sysfs_lock);
|
|
|
|
/*
|
|
* /sys/class/gpio/gpioN... only for GPIOs that are exported
|
|
* /direction
|
|
* * MAY BE OMITTED if kernel won't allow direction changes
|
|
* * is read/write as "in" or "out"
|
|
* * may also be written as "high" or "low", initializing
|
|
* output value as specified ("out" implies "low")
|
|
* /value
|
|
* * always readable, subject to hardware behavior
|
|
* * may be writable, as zero/nonzero
|
|
* /edge
|
|
* * configures behavior of poll(2) on /value
|
|
* * available only if pin can generate IRQs on input
|
|
* * is read/write as "none", "falling", "rising", or "both"
|
|
* /active_low
|
|
* * configures polarity of /value
|
|
* * is read/write as zero/nonzero
|
|
* * also affects existing and subsequent "falling" and "rising"
|
|
* /edge configuration
|
|
*/
|
|
|
|
static ssize_t direction_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct gpio_desc *desc = dev_get_drvdata(dev);
|
|
ssize_t status;
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
if (!test_bit(FLAG_EXPORT, &desc->flags)) {
|
|
status = -EIO;
|
|
} else {
|
|
gpiod_get_direction(desc);
|
|
status = sprintf(buf, "%s\n",
|
|
test_bit(FLAG_IS_OUT, &desc->flags)
|
|
? "out" : "in");
|
|
}
|
|
|
|
mutex_unlock(&sysfs_lock);
|
|
return status;
|
|
}
|
|
|
|
static ssize_t direction_store(struct device *dev,
|
|
struct device_attribute *attr, const char *buf, size_t size)
|
|
{
|
|
struct gpio_desc *desc = dev_get_drvdata(dev);
|
|
ssize_t status;
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
if (!test_bit(FLAG_EXPORT, &desc->flags))
|
|
status = -EIO;
|
|
else if (sysfs_streq(buf, "high"))
|
|
status = gpiod_direction_output_raw(desc, 1);
|
|
else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low"))
|
|
status = gpiod_direction_output_raw(desc, 0);
|
|
else if (sysfs_streq(buf, "in"))
|
|
status = gpiod_direction_input(desc);
|
|
else
|
|
status = -EINVAL;
|
|
|
|
mutex_unlock(&sysfs_lock);
|
|
return status ? : size;
|
|
}
|
|
static DEVICE_ATTR_RW(direction);
|
|
|
|
static ssize_t value_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct gpio_desc *desc = dev_get_drvdata(dev);
|
|
ssize_t status;
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
if (!test_bit(FLAG_EXPORT, &desc->flags))
|
|
status = -EIO;
|
|
else
|
|
status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc));
|
|
|
|
mutex_unlock(&sysfs_lock);
|
|
return status;
|
|
}
|
|
|
|
static ssize_t value_store(struct device *dev,
|
|
struct device_attribute *attr, const char *buf, size_t size)
|
|
{
|
|
struct gpio_desc *desc = dev_get_drvdata(dev);
|
|
ssize_t status;
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
if (!test_bit(FLAG_EXPORT, &desc->flags))
|
|
status = -EIO;
|
|
else if (!test_bit(FLAG_IS_OUT, &desc->flags))
|
|
status = -EPERM;
|
|
else {
|
|
long value;
|
|
|
|
status = kstrtol(buf, 0, &value);
|
|
if (status == 0) {
|
|
gpiod_set_value_cansleep(desc, value);
|
|
status = size;
|
|
}
|
|
}
|
|
|
|
mutex_unlock(&sysfs_lock);
|
|
return status;
|
|
}
|
|
static DEVICE_ATTR_RW(value);
|
|
|
|
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
|
|
{
|
|
struct kernfs_node *value_sd = priv;
|
|
|
|
sysfs_notify_dirent(value_sd);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
|
|
unsigned long gpio_flags)
|
|
{
|
|
struct kernfs_node *value_sd;
|
|
unsigned long irq_flags;
|
|
int ret, irq, id;
|
|
|
|
if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags)
|
|
return 0;
|
|
|
|
irq = gpiod_to_irq(desc);
|
|
if (irq < 0)
|
|
return -EIO;
|
|
|
|
id = desc->flags >> ID_SHIFT;
|
|
value_sd = idr_find(&dirent_idr, id);
|
|
if (value_sd)
|
|
free_irq(irq, value_sd);
|
|
|
|
desc->flags &= ~GPIO_TRIGGER_MASK;
|
|
|
|
if (!gpio_flags) {
|
|
gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
|
|
ret = 0;
|
|
goto free_id;
|
|
}
|
|
|
|
irq_flags = IRQF_SHARED;
|
|
if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
|
|
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
|
|
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
|
|
if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
|
|
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
|
|
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
|
|
|
|
if (!value_sd) {
|
|
value_sd = sysfs_get_dirent(dev->kobj.sd, "value");
|
|
if (!value_sd) {
|
|
ret = -ENODEV;
|
|
goto err_out;
|
|
}
|
|
|
|
ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL);
|
|
if (ret < 0)
|
|
goto free_sd;
|
|
id = ret;
|
|
|
|
desc->flags &= GPIO_FLAGS_MASK;
|
|
desc->flags |= (unsigned long)id << ID_SHIFT;
|
|
|
|
if (desc->flags >> ID_SHIFT != id) {
|
|
ret = -ERANGE;
|
|
goto free_id;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* FIXME: This should be done in the irq_request_resources callback
|
|
* when the irq is requested, but a few drivers currently fail
|
|
* to do so.
|
|
*
|
|
* Remove this redundant call (along with the corresponding
|
|
* unlock) when those drivers have been fixed.
|
|
*/
|
|
ret = gpiochip_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
|
|
if (ret < 0)
|
|
goto free_id;
|
|
|
|
ret = request_any_context_irq(irq, gpio_sysfs_irq, irq_flags,
|
|
"gpiolib", value_sd);
|
|
if (ret < 0)
|
|
goto err_unlock;
|
|
|
|
desc->flags |= gpio_flags;
|
|
return 0;
|
|
|
|
err_unlock:
|
|
gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
|
|
free_id:
|
|
idr_remove(&dirent_idr, id);
|
|
desc->flags &= GPIO_FLAGS_MASK;
|
|
free_sd:
|
|
if (value_sd)
|
|
sysfs_put(value_sd);
|
|
err_out:
|
|
return ret;
|
|
}
|
|
|
|
static const struct {
|
|
const char *name;
|
|
unsigned long flags;
|
|
} trigger_types[] = {
|
|
{ "none", 0 },
|
|
{ "falling", BIT(FLAG_TRIG_FALL) },
|
|
{ "rising", BIT(FLAG_TRIG_RISE) },
|
|
{ "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) },
|
|
};
|
|
|
|
static ssize_t edge_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
const struct gpio_desc *desc = dev_get_drvdata(dev);
|
|
ssize_t status;
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
if (!test_bit(FLAG_EXPORT, &desc->flags))
|
|
status = -EIO;
|
|
else {
|
|
int i;
|
|
|
|
status = 0;
|
|
for (i = 0; i < ARRAY_SIZE(trigger_types); i++)
|
|
if ((desc->flags & GPIO_TRIGGER_MASK)
|
|
== trigger_types[i].flags) {
|
|
status = sprintf(buf, "%s\n",
|
|
trigger_types[i].name);
|
|
break;
|
|
}
|
|
}
|
|
|
|
mutex_unlock(&sysfs_lock);
|
|
return status;
|
|
}
|
|
|
|
static ssize_t edge_store(struct device *dev,
|
|
struct device_attribute *attr, const char *buf, size_t size)
|
|
{
|
|
struct gpio_desc *desc = dev_get_drvdata(dev);
|
|
ssize_t status;
|
|
int i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(trigger_types); i++)
|
|
if (sysfs_streq(trigger_types[i].name, buf))
|
|
goto found;
|
|
return -EINVAL;
|
|
|
|
found:
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
if (!test_bit(FLAG_EXPORT, &desc->flags))
|
|
status = -EIO;
|
|
else {
|
|
status = gpio_setup_irq(desc, dev, trigger_types[i].flags);
|
|
if (!status)
|
|
status = size;
|
|
}
|
|
|
|
mutex_unlock(&sysfs_lock);
|
|
|
|
return status;
|
|
}
|
|
static DEVICE_ATTR_RW(edge);
|
|
|
|
static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev,
|
|
int value)
|
|
{
|
|
int status = 0;
|
|
|
|
if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
|
|
return 0;
|
|
|
|
if (value)
|
|
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
|
else
|
|
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
|
|
|
/* reconfigure poll(2) support if enabled on one edge only */
|
|
if (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^
|
|
!!test_bit(FLAG_TRIG_FALL, &desc->flags)) {
|
|
unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK;
|
|
|
|
gpio_setup_irq(desc, dev, 0);
|
|
status = gpio_setup_irq(desc, dev, trigger_flags);
|
|
}
|
|
|
|
return status;
|
|
}
|
|
|
|
static ssize_t active_low_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
const struct gpio_desc *desc = dev_get_drvdata(dev);
|
|
ssize_t status;
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
if (!test_bit(FLAG_EXPORT, &desc->flags))
|
|
status = -EIO;
|
|
else
|
|
status = sprintf(buf, "%d\n",
|
|
!!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
|
|
|
|
mutex_unlock(&sysfs_lock);
|
|
|
|
return status;
|
|
}
|
|
|
|
static ssize_t active_low_store(struct device *dev,
|
|
struct device_attribute *attr, const char *buf, size_t size)
|
|
{
|
|
struct gpio_desc *desc = dev_get_drvdata(dev);
|
|
ssize_t status;
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
if (!test_bit(FLAG_EXPORT, &desc->flags)) {
|
|
status = -EIO;
|
|
} else {
|
|
long value;
|
|
|
|
status = kstrtol(buf, 0, &value);
|
|
if (status == 0)
|
|
status = sysfs_set_active_low(desc, dev, value != 0);
|
|
}
|
|
|
|
mutex_unlock(&sysfs_lock);
|
|
|
|
return status ? : size;
|
|
}
|
|
static DEVICE_ATTR_RW(active_low);
|
|
|
|
static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
|
|
int n)
|
|
{
|
|
struct device *dev = container_of(kobj, struct device, kobj);
|
|
struct gpio_desc *desc = dev_get_drvdata(dev);
|
|
umode_t mode = attr->mode;
|
|
bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags);
|
|
|
|
if (attr == &dev_attr_direction.attr) {
|
|
if (!show_direction)
|
|
mode = 0;
|
|
} else if (attr == &dev_attr_edge.attr) {
|
|
if (gpiod_to_irq(desc) < 0)
|
|
mode = 0;
|
|
if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
|
|
mode = 0;
|
|
}
|
|
|
|
return mode;
|
|
}
|
|
|
|
static struct attribute *gpio_attrs[] = {
|
|
&dev_attr_direction.attr,
|
|
&dev_attr_edge.attr,
|
|
&dev_attr_value.attr,
|
|
&dev_attr_active_low.attr,
|
|
NULL,
|
|
};
|
|
|
|
static const struct attribute_group gpio_group = {
|
|
.attrs = gpio_attrs,
|
|
.is_visible = gpio_is_visible,
|
|
};
|
|
|
|
static const struct attribute_group *gpio_groups[] = {
|
|
&gpio_group,
|
|
NULL
|
|
};
|
|
|
|
/*
|
|
* /sys/class/gpio/gpiochipN/
|
|
* /base ... matching gpio_chip.base (N)
|
|
* /label ... matching gpio_chip.label
|
|
* /ngpio ... matching gpio_chip.ngpio
|
|
*/
|
|
|
|
static ssize_t base_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
const struct gpio_chip *chip = dev_get_drvdata(dev);
|
|
|
|
return sprintf(buf, "%d\n", chip->base);
|
|
}
|
|
static DEVICE_ATTR_RO(base);
|
|
|
|
static ssize_t label_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
const struct gpio_chip *chip = dev_get_drvdata(dev);
|
|
|
|
return sprintf(buf, "%s\n", chip->label ? : "");
|
|
}
|
|
static DEVICE_ATTR_RO(label);
|
|
|
|
static ssize_t ngpio_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
const struct gpio_chip *chip = dev_get_drvdata(dev);
|
|
|
|
return sprintf(buf, "%u\n", chip->ngpio);
|
|
}
|
|
static DEVICE_ATTR_RO(ngpio);
|
|
|
|
static struct attribute *gpiochip_attrs[] = {
|
|
&dev_attr_base.attr,
|
|
&dev_attr_label.attr,
|
|
&dev_attr_ngpio.attr,
|
|
NULL,
|
|
};
|
|
ATTRIBUTE_GROUPS(gpiochip);
|
|
|
|
/*
|
|
* /sys/class/gpio/export ... write-only
|
|
* integer N ... number of GPIO to export (full access)
|
|
* /sys/class/gpio/unexport ... write-only
|
|
* integer N ... number of GPIO to unexport
|
|
*/
|
|
static ssize_t export_store(struct class *class,
|
|
struct class_attribute *attr,
|
|
const char *buf, size_t len)
|
|
{
|
|
long gpio;
|
|
struct gpio_desc *desc;
|
|
int status;
|
|
|
|
status = kstrtol(buf, 0, &gpio);
|
|
if (status < 0)
|
|
goto done;
|
|
|
|
desc = gpio_to_desc(gpio);
|
|
/* reject invalid GPIOs */
|
|
if (!desc) {
|
|
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* No extra locking here; FLAG_SYSFS just signifies that the
|
|
* request and export were done by on behalf of userspace, so
|
|
* they may be undone on its behalf too.
|
|
*/
|
|
|
|
status = gpiod_request(desc, "sysfs");
|
|
if (status < 0) {
|
|
if (status == -EPROBE_DEFER)
|
|
status = -ENODEV;
|
|
goto done;
|
|
}
|
|
status = gpiod_export(desc, true);
|
|
if (status < 0)
|
|
gpiod_free(desc);
|
|
else
|
|
set_bit(FLAG_SYSFS, &desc->flags);
|
|
|
|
done:
|
|
if (status)
|
|
pr_debug("%s: status %d\n", __func__, status);
|
|
return status ? : len;
|
|
}
|
|
|
|
static ssize_t unexport_store(struct class *class,
|
|
struct class_attribute *attr,
|
|
const char *buf, size_t len)
|
|
{
|
|
long gpio;
|
|
struct gpio_desc *desc;
|
|
int status;
|
|
|
|
status = kstrtol(buf, 0, &gpio);
|
|
if (status < 0)
|
|
goto done;
|
|
|
|
desc = gpio_to_desc(gpio);
|
|
/* reject bogus commands (gpio_unexport ignores them) */
|
|
if (!desc) {
|
|
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
|
return -EINVAL;
|
|
}
|
|
|
|
status = -EINVAL;
|
|
|
|
/* No extra locking here; FLAG_SYSFS just signifies that the
|
|
* request and export were done by on behalf of userspace, so
|
|
* they may be undone on its behalf too.
|
|
*/
|
|
if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) {
|
|
status = 0;
|
|
gpiod_free(desc);
|
|
}
|
|
done:
|
|
if (status)
|
|
pr_debug("%s: status %d\n", __func__, status);
|
|
return status ? : len;
|
|
}
|
|
|
|
static struct class_attribute gpio_class_attrs[] = {
|
|
__ATTR(export, 0200, NULL, export_store),
|
|
__ATTR(unexport, 0200, NULL, unexport_store),
|
|
__ATTR_NULL,
|
|
};
|
|
|
|
static struct class gpio_class = {
|
|
.name = "gpio",
|
|
.owner = THIS_MODULE,
|
|
|
|
.class_attrs = gpio_class_attrs,
|
|
};
|
|
|
|
|
|
/**
|
|
* gpiod_export - export a GPIO through sysfs
|
|
* @gpio: gpio to make available, already requested
|
|
* @direction_may_change: true if userspace may change gpio direction
|
|
* Context: arch_initcall or later
|
|
*
|
|
* When drivers want to make a GPIO accessible to userspace after they
|
|
* have requested it -- perhaps while debugging, or as part of their
|
|
* public interface -- they may use this routine. If the GPIO can
|
|
* change direction (some can't) and the caller allows it, userspace
|
|
* will see "direction" sysfs attribute which may be used to change
|
|
* the gpio's direction. A "value" attribute will always be provided.
|
|
*
|
|
* Returns zero on success, else an error.
|
|
*/
|
|
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
|
|
{
|
|
struct gpio_chip *chip;
|
|
unsigned long flags;
|
|
int status;
|
|
const char *ioname = NULL;
|
|
struct device *dev;
|
|
int offset;
|
|
|
|
/* can't export until sysfs is available ... */
|
|
if (!gpio_class.p) {
|
|
pr_debug("%s: called too early!\n", __func__);
|
|
return -ENOENT;
|
|
}
|
|
|
|
if (!desc) {
|
|
pr_debug("%s: invalid gpio descriptor\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
chip = desc->chip;
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
/* check if chip is being removed */
|
|
if (!chip || !chip->cdev) {
|
|
status = -ENODEV;
|
|
goto fail_unlock;
|
|
}
|
|
|
|
spin_lock_irqsave(&gpio_lock, flags);
|
|
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
|
|
test_bit(FLAG_EXPORT, &desc->flags)) {
|
|
spin_unlock_irqrestore(&gpio_lock, flags);
|
|
gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n",
|
|
__func__,
|
|
test_bit(FLAG_REQUESTED, &desc->flags),
|
|
test_bit(FLAG_EXPORT, &desc->flags));
|
|
status = -EPERM;
|
|
goto fail_unlock;
|
|
}
|
|
|
|
if (chip->direction_input && chip->direction_output &&
|
|
direction_may_change) {
|
|
set_bit(FLAG_SYSFS_DIR, &desc->flags);
|
|
}
|
|
|
|
spin_unlock_irqrestore(&gpio_lock, flags);
|
|
|
|
offset = gpio_chip_hwgpio(desc);
|
|
if (chip->names && chip->names[offset])
|
|
ioname = chip->names[offset];
|
|
|
|
dev = device_create_with_groups(&gpio_class, chip->dev,
|
|
MKDEV(0, 0), desc, gpio_groups,
|
|
ioname ? ioname : "gpio%u",
|
|
desc_to_gpio(desc));
|
|
if (IS_ERR(dev)) {
|
|
status = PTR_ERR(dev);
|
|
goto fail_unlock;
|
|
}
|
|
|
|
set_bit(FLAG_EXPORT, &desc->flags);
|
|
mutex_unlock(&sysfs_lock);
|
|
return 0;
|
|
|
|
fail_unlock:
|
|
mutex_unlock(&sysfs_lock);
|
|
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
|
|
return status;
|
|
}
|
|
EXPORT_SYMBOL_GPL(gpiod_export);
|
|
|
|
static int match_export(struct device *dev, const void *data)
|
|
{
|
|
return dev_get_drvdata(dev) == data;
|
|
}
|
|
|
|
/**
|
|
* gpiod_export_link - create a sysfs link to an exported GPIO node
|
|
* @dev: device under which to create symlink
|
|
* @name: name of the symlink
|
|
* @gpio: gpio to create symlink to, already exported
|
|
*
|
|
* Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN
|
|
* node. Caller is responsible for unlinking.
|
|
*
|
|
* Returns zero on success, else an error.
|
|
*/
|
|
int gpiod_export_link(struct device *dev, const char *name,
|
|
struct gpio_desc *desc)
|
|
{
|
|
int status = -EINVAL;
|
|
|
|
if (!desc) {
|
|
pr_warn("%s: invalid GPIO\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
if (test_bit(FLAG_EXPORT, &desc->flags)) {
|
|
struct device *tdev;
|
|
|
|
tdev = class_find_device(&gpio_class, NULL, desc, match_export);
|
|
if (tdev != NULL) {
|
|
status = sysfs_create_link(&dev->kobj, &tdev->kobj,
|
|
name);
|
|
put_device(tdev);
|
|
} else {
|
|
status = -ENODEV;
|
|
}
|
|
}
|
|
|
|
mutex_unlock(&sysfs_lock);
|
|
|
|
if (status)
|
|
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
|
|
|
|
return status;
|
|
}
|
|
EXPORT_SYMBOL_GPL(gpiod_export_link);
|
|
|
|
/**
|
|
* gpiod_unexport - reverse effect of gpio_export()
|
|
* @gpio: gpio to make unavailable
|
|
*
|
|
* This is implicit on gpio_free().
|
|
*/
|
|
void gpiod_unexport(struct gpio_desc *desc)
|
|
{
|
|
int status = 0;
|
|
struct device *dev = NULL;
|
|
|
|
if (!desc) {
|
|
pr_warn("%s: invalid GPIO\n", __func__);
|
|
return;
|
|
}
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
|
|
if (test_bit(FLAG_EXPORT, &desc->flags)) {
|
|
|
|
dev = class_find_device(&gpio_class, NULL, desc, match_export);
|
|
if (dev) {
|
|
clear_bit(FLAG_SYSFS_DIR, &desc->flags);
|
|
clear_bit(FLAG_EXPORT, &desc->flags);
|
|
} else
|
|
status = -ENODEV;
|
|
}
|
|
|
|
mutex_unlock(&sysfs_lock);
|
|
|
|
if (dev) {
|
|
device_unregister(dev);
|
|
/*
|
|
* Release irq after deregistration to prevent race with
|
|
* edge_store.
|
|
*/
|
|
gpio_setup_irq(desc, dev, 0);
|
|
put_device(dev);
|
|
}
|
|
|
|
if (status)
|
|
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
|
|
}
|
|
EXPORT_SYMBOL_GPL(gpiod_unexport);
|
|
|
|
int gpiochip_sysfs_register(struct gpio_chip *chip)
|
|
{
|
|
struct device *dev;
|
|
|
|
/*
|
|
* Many systems add gpio chips for SOC support very early,
|
|
* before driver model support is available. In those cases we
|
|
* register later, in gpiolib_sysfs_init() ... here we just
|
|
* verify that _some_ field of gpio_class got initialized.
|
|
*/
|
|
if (!gpio_class.p)
|
|
return 0;
|
|
|
|
/* use chip->base for the ID; it's already known to be unique */
|
|
dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
|
|
chip, gpiochip_groups,
|
|
"gpiochip%d", chip->base);
|
|
if (IS_ERR(dev))
|
|
return PTR_ERR(dev);
|
|
|
|
mutex_lock(&sysfs_lock);
|
|
chip->cdev = dev;
|
|
mutex_unlock(&sysfs_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void gpiochip_sysfs_unregister(struct gpio_chip *chip)
|
|
{
|
|
struct gpio_desc *desc;
|
|
unsigned int i;
|
|
|
|
if (!chip->cdev)
|
|
return;
|
|
|
|
device_unregister(chip->cdev);
|
|
|
|
/* prevent further gpiod exports */
|
|
mutex_lock(&sysfs_lock);
|
|
chip->cdev = NULL;
|
|
mutex_unlock(&sysfs_lock);
|
|
|
|
/* unregister gpiod class devices owned by sysfs */
|
|
for (i = 0; i < chip->ngpio; i++) {
|
|
desc = &chip->desc[i];
|
|
if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
|
|
gpiod_free(desc);
|
|
}
|
|
}
|
|
|
|
static int __init gpiolib_sysfs_init(void)
|
|
{
|
|
int status;
|
|
unsigned long flags;
|
|
struct gpio_chip *chip;
|
|
|
|
status = class_register(&gpio_class);
|
|
if (status < 0)
|
|
return status;
|
|
|
|
/* Scan and register the gpio_chips which registered very
|
|
* early (e.g. before the class_register above was called).
|
|
*
|
|
* We run before arch_initcall() so chip->dev nodes can have
|
|
* registered, and so arch_initcall() can always gpio_export().
|
|
*/
|
|
spin_lock_irqsave(&gpio_lock, flags);
|
|
list_for_each_entry(chip, &gpio_chips, list) {
|
|
if (chip->cdev)
|
|
continue;
|
|
|
|
/*
|
|
* TODO we yield gpio_lock here because
|
|
* gpiochip_sysfs_register() acquires a mutex. This is unsafe
|
|
* and needs to be fixed.
|
|
*
|
|
* Also it would be nice to use gpiochip_find() here so we
|
|
* can keep gpio_chips local to gpiolib.c, but the yield of
|
|
* gpio_lock prevents us from doing this.
|
|
*/
|
|
spin_unlock_irqrestore(&gpio_lock, flags);
|
|
status = gpiochip_sysfs_register(chip);
|
|
spin_lock_irqsave(&gpio_lock, flags);
|
|
}
|
|
spin_unlock_irqrestore(&gpio_lock, flags);
|
|
|
|
|
|
return status;
|
|
}
|
|
postcore_initcall(gpiolib_sysfs_init);
|