rfkill: rewrite

This patch completely rewrites the rfkill core to address
the following deficiencies:

 * all rfkill drivers need to implement polling where necessary
   rather than having one central implementation

 * updating the rfkill state cannot be done from arbitrary
   contexts, forcing drivers to use schedule_work and requiring
   lots of code

 * rfkill drivers need to keep track of soft/hard blocked
   internally -- the core should do this

 * the rfkill API has many unexpected quirks, for example being
   asymmetric wrt. alloc/free and register/unregister

 * rfkill can call back into a driver from within a function the
   driver called -- this is prone to deadlocks and generally
   should be avoided

 * rfkill-input pointlessly is a separate module

 * drivers need to #ifdef rfkill functions (unless they want to
   depend on or select RFKILL) -- rfkill should provide inlines
   that do nothing if it isn't compiled in

 * the rfkill structure is not opaque -- drivers need to initialise
   it correctly (lots of sanity checking code required) -- instead
   force drivers to pass the right variables to rfkill_alloc()

 * the documentation is hard to read because it always assumes the
   reader is completely clueless and contains way TOO MANY CAPS

 * the rfkill code needlessly uses a lot of locks and atomic
   operations in locked sections

 * fix LED trigger to actually change the LED when the radio state
   changes -- this wasn't done before

Tested-by: Alan Jenkins <alan-jenkins@tuffmail.co.uk>
Signed-off-by: Henrique de Moraes Holschuh <hmh@hmh.eng.br> [thinkpad]
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Johannes Berg
2009-06-02 13:01:37 +02:00
committed by John W. Linville
parent 0f6399c4c5
commit 19d337dff9
46 changed files with 2567 additions and 3304 deletions

View File

@@ -10,22 +10,15 @@ menuconfig RFKILL
To compile this driver as a module, choose M here: the
module will be called rfkill.
config RFKILL_INPUT
tristate "Input layer to RF switch connector"
depends on RFKILL && INPUT
help
Say Y here if you want kernel automatically toggle state
of RF switches on and off when user presses appropriate
button or a key on the keyboard. Without this module you
need a some kind of userspace application to control
state of the switches.
To compile this driver as a module, choose M here: the
module will be called rfkill-input.
# LED trigger support
config RFKILL_LEDS
bool
depends on RFKILL && LEDS_TRIGGERS
depends on RFKILL
depends on LEDS_TRIGGERS = y || RFKILL = LEDS_TRIGGERS
default y
config RFKILL_INPUT
bool
depends on RFKILL
depends on INPUT = y || RFKILL = INPUT
default y

View File

@@ -2,5 +2,6 @@
# Makefile for the RF switch subsystem.
#
obj-$(CONFIG_RFKILL) += rfkill.o
obj-$(CONFIG_RFKILL_INPUT) += rfkill-input.o
rfkill-y += core.o
rfkill-$(CONFIG_RFKILL_INPUT) += input.o
obj-$(CONFIG_RFKILL) += rfkill.o

896
net/rfkill/core.c Normal file
View File

@@ -0,0 +1,896 @@
/*
* Copyright (C) 2006 - 2007 Ivo van Doorn
* Copyright (C) 2007 Dmitry Torokhov
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/workqueue.h>
#include <linux/capability.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rfkill.h>
#include <linux/spinlock.h>
#include "rfkill.h"
#define POLL_INTERVAL (5 * HZ)
#define RFKILL_BLOCK_HW BIT(0)
#define RFKILL_BLOCK_SW BIT(1)
#define RFKILL_BLOCK_SW_PREV BIT(2)
#define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
RFKILL_BLOCK_SW |\
RFKILL_BLOCK_SW_PREV)
#define RFKILL_BLOCK_SW_SETCALL BIT(31)
struct rfkill {
spinlock_t lock;
const char *name;
enum rfkill_type type;
unsigned long state;
bool registered;
bool suspended;
const struct rfkill_ops *ops;
void *data;
#ifdef CONFIG_RFKILL_LEDS
struct led_trigger led_trigger;
const char *ledtrigname;
#endif
struct device dev;
struct list_head node;
struct delayed_work poll_work;
struct work_struct uevent_work;
struct work_struct sync_work;
};
#define to_rfkill(d) container_of(d, struct rfkill, dev)
MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
MODULE_DESCRIPTION("RF switch support");
MODULE_LICENSE("GPL");
/*
* The locking here should be made much smarter, we currently have
* a bit of a stupid situation because drivers might want to register
* the rfkill struct under their own lock, and take this lock during
* rfkill method calls -- which will cause an AB-BA deadlock situation.
*
* To fix that, we need to rework this code here to be mostly lock-free
* and only use the mutex for list manipulations, not to protect the
* various other global variables. Then we can avoid holding the mutex
* around driver operations, and all is happy.
*/
static LIST_HEAD(rfkill_list); /* list of registered rf switches */
static DEFINE_MUTEX(rfkill_global_mutex);
static unsigned int rfkill_default_state = 1;
module_param_named(default_state, rfkill_default_state, uint, 0444);
MODULE_PARM_DESC(default_state,
"Default initial state for all radio types, 0 = radio off");
static struct {
bool cur, def;
} rfkill_global_states[NUM_RFKILL_TYPES];
static unsigned long rfkill_states_default_locked;
static bool rfkill_epo_lock_active;
#ifdef CONFIG_RFKILL_LEDS
static void rfkill_led_trigger_event(struct rfkill *rfkill)
{
struct led_trigger *trigger;
if (!rfkill->registered)
return;
trigger = &rfkill->led_trigger;
if (rfkill->state & RFKILL_BLOCK_ANY)
led_trigger_event(trigger, LED_OFF);
else
led_trigger_event(trigger, LED_FULL);
}
static void rfkill_led_trigger_activate(struct led_classdev *led)
{
struct rfkill *rfkill;
rfkill = container_of(led->trigger, struct rfkill, led_trigger);
rfkill_led_trigger_event(rfkill);
}
const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
{
return rfkill->led_trigger.name;
}
EXPORT_SYMBOL(rfkill_get_led_trigger_name);
void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
{
BUG_ON(!rfkill);
rfkill->ledtrigname = name;
}
EXPORT_SYMBOL(rfkill_set_led_trigger_name);
static int rfkill_led_trigger_register(struct rfkill *rfkill)
{
rfkill->led_trigger.name = rfkill->ledtrigname
? : dev_name(&rfkill->dev);
rfkill->led_trigger.activate = rfkill_led_trigger_activate;
return led_trigger_register(&rfkill->led_trigger);
}
static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
{
led_trigger_unregister(&rfkill->led_trigger);
}
#else
static void rfkill_led_trigger_event(struct rfkill *rfkill)
{
}
static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
{
return 0;
}
static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
{
}
#endif /* CONFIG_RFKILL_LEDS */
static void rfkill_uevent(struct rfkill *rfkill)
{
if (!rfkill->registered || rfkill->suspended)
return;
kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
}
static bool __rfkill_set_hw_state(struct rfkill *rfkill,
bool blocked, bool *change)
{
unsigned long flags;
bool prev, any;
BUG_ON(!rfkill);
spin_lock_irqsave(&rfkill->lock, flags);
prev = !!(rfkill->state & RFKILL_BLOCK_HW);
if (blocked)
rfkill->state |= RFKILL_BLOCK_HW;
else
rfkill->state &= ~RFKILL_BLOCK_HW;
*change = prev != blocked;
any = rfkill->state & RFKILL_BLOCK_ANY;
spin_unlock_irqrestore(&rfkill->lock, flags);
rfkill_led_trigger_event(rfkill);
return any;
}
/**
* rfkill_set_block - wrapper for set_block method
*
* @rfkill: the rfkill struct to use
* @blocked: the new software state
*
* Calls the set_block method (when applicable) and handles notifications
* etc. as well.
*/
static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
{
unsigned long flags;
int err;
/*
* Some platforms (...!) generate input events which affect the
* _hard_ kill state -- whenever something tries to change the
* current software state query the hardware state too.
*/
if (rfkill->ops->query)
rfkill->ops->query(rfkill, rfkill->data);
spin_lock_irqsave(&rfkill->lock, flags);
if (rfkill->state & RFKILL_BLOCK_SW)
rfkill->state |= RFKILL_BLOCK_SW_PREV;
else
rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
if (blocked)
rfkill->state |= RFKILL_BLOCK_SW;
else
rfkill->state &= ~RFKILL_BLOCK_SW;
rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
spin_unlock_irqrestore(&rfkill->lock, flags);
if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
return;
err = rfkill->ops->set_block(rfkill->data, blocked);
spin_lock_irqsave(&rfkill->lock, flags);
if (err) {
/*
* Failed -- reset status to _prev, this may be different
* from what set set _PREV to earlier in this function
* if rfkill_set_sw_state was invoked.
*/
if (rfkill->state & RFKILL_BLOCK_SW_PREV)
rfkill->state |= RFKILL_BLOCK_SW;
else
rfkill->state &= ~RFKILL_BLOCK_SW;
}
rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
spin_unlock_irqrestore(&rfkill->lock, flags);
rfkill_led_trigger_event(rfkill);
rfkill_uevent(rfkill);
}
/**
* __rfkill_switch_all - Toggle state of all switches of given type
* @type: type of interfaces to be affected
* @state: the new state
*
* This function sets the state of all switches of given type,
* unless a specific switch is claimed by userspace (in which case,
* that switch is left alone) or suspended.
*
* Caller must have acquired rfkill_global_mutex.
*/
static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
{
struct rfkill *rfkill;
rfkill_global_states[type].cur = blocked;
list_for_each_entry(rfkill, &rfkill_list, node) {
if (rfkill->type != type)
continue;
rfkill_set_block(rfkill, blocked);
}
}
/**
* rfkill_switch_all - Toggle state of all switches of given type
* @type: type of interfaces to be affected
* @state: the new state
*
* Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
* Please refer to __rfkill_switch_all() for details.
*
* Does nothing if the EPO lock is active.
*/
void rfkill_switch_all(enum rfkill_type type, bool blocked)
{
mutex_lock(&rfkill_global_mutex);
if (!rfkill_epo_lock_active)
__rfkill_switch_all(type, blocked);
mutex_unlock(&rfkill_global_mutex);
}
/**
* rfkill_epo - emergency power off all transmitters
*
* This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
* ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
*
* The global state before the EPO is saved and can be restored later
* using rfkill_restore_states().
*/
void rfkill_epo(void)
{
struct rfkill *rfkill;
int i;
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = true;
list_for_each_entry(rfkill, &rfkill_list, node)
rfkill_set_block(rfkill, true);
for (i = 0; i < NUM_RFKILL_TYPES; i++) {
rfkill_global_states[i].def = rfkill_global_states[i].cur;
rfkill_global_states[i].cur = true;
}
mutex_unlock(&rfkill_global_mutex);
}
/**
* rfkill_restore_states - restore global states
*
* Restore (and sync switches to) the global state from the
* states in rfkill_default_states. This can undo the effects of
* a call to rfkill_epo().
*/
void rfkill_restore_states(void)
{
int i;
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = false;
for (i = 0; i < NUM_RFKILL_TYPES; i++)
__rfkill_switch_all(i, rfkill_global_states[i].def);
mutex_unlock(&rfkill_global_mutex);
}
/**
* rfkill_remove_epo_lock - unlock state changes
*
* Used by rfkill-input manually unlock state changes, when
* the EPO switch is deactivated.
*/
void rfkill_remove_epo_lock(void)
{
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = false;
mutex_unlock(&rfkill_global_mutex);
}
/**
* rfkill_is_epo_lock_active - returns true EPO is active
*
* Returns 0 (false) if there is NOT an active EPO contidion,
* and 1 (true) if there is an active EPO contition, which
* locks all radios in one of the BLOCKED states.
*
* Can be called in atomic context.
*/
bool rfkill_is_epo_lock_active(void)
{
return rfkill_epo_lock_active;
}
/**
* rfkill_get_global_sw_state - returns global state for a type
* @type: the type to get the global state of
*
* Returns the current global state for a given wireless
* device type.
*/
bool rfkill_get_global_sw_state(const enum rfkill_type type)
{
return rfkill_global_states[type].cur;
}
void rfkill_set_global_sw_state(const enum rfkill_type type, bool blocked)
{
mutex_lock(&rfkill_global_mutex);
/* don't allow unblock when epo */
if (rfkill_epo_lock_active && !blocked)
goto out;
/* too late */
if (rfkill_states_default_locked & BIT(type))
goto out;
rfkill_states_default_locked |= BIT(type);
rfkill_global_states[type].cur = blocked;
rfkill_global_states[type].def = blocked;
out:
mutex_unlock(&rfkill_global_mutex);
}
EXPORT_SYMBOL(rfkill_set_global_sw_state);
bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
{
bool ret, change;
ret = __rfkill_set_hw_state(rfkill, blocked, &change);
if (!rfkill->registered)
return ret;
if (change)
schedule_work(&rfkill->uevent_work);
return ret;
}
EXPORT_SYMBOL(rfkill_set_hw_state);
static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
{
u32 bit = RFKILL_BLOCK_SW;
/* if in a ops->set_block right now, use other bit */
if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
bit = RFKILL_BLOCK_SW_PREV;
if (blocked)
rfkill->state |= bit;
else
rfkill->state &= ~bit;
}
bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
{
unsigned long flags;
bool prev, hwblock;
BUG_ON(!rfkill);
spin_lock_irqsave(&rfkill->lock, flags);
prev = !!(rfkill->state & RFKILL_BLOCK_SW);
__rfkill_set_sw_state(rfkill, blocked);
hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
blocked = blocked || hwblock;
spin_unlock_irqrestore(&rfkill->lock, flags);
if (!rfkill->registered)
return blocked;
if (prev != blocked && !hwblock)
schedule_work(&rfkill->uevent_work);
rfkill_led_trigger_event(rfkill);
return blocked;
}
EXPORT_SYMBOL(rfkill_set_sw_state);
void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
{
unsigned long flags;
bool swprev, hwprev;
BUG_ON(!rfkill);
spin_lock_irqsave(&rfkill->lock, flags);
/*
* No need to care about prev/setblock ... this is for uevent only
* and that will get triggered by rfkill_set_block anyway.
*/
swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
__rfkill_set_sw_state(rfkill, sw);
spin_unlock_irqrestore(&rfkill->lock, flags);
if (!rfkill->registered)
return;
if (swprev != sw || hwprev != hw)
schedule_work(&rfkill->uevent_work);
rfkill_led_trigger_event(rfkill);
}
EXPORT_SYMBOL(rfkill_set_states);
static ssize_t rfkill_name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%s\n", rfkill->name);
}
static const char *rfkill_get_type_str(enum rfkill_type type)
{
switch (type) {
case RFKILL_TYPE_WLAN:
return "wlan";
case RFKILL_TYPE_BLUETOOTH:
return "bluetooth";
case RFKILL_TYPE_UWB:
return "ultrawideband";
case RFKILL_TYPE_WIMAX:
return "wimax";
case RFKILL_TYPE_WWAN:
return "wwan";
default:
BUG();
}
BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1);
}
static ssize_t rfkill_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
}
static u8 user_state_from_blocked(unsigned long state)
{
if (state & RFKILL_BLOCK_HW)
return RFKILL_USER_STATE_HARD_BLOCKED;
if (state & RFKILL_BLOCK_SW)
return RFKILL_USER_STATE_SOFT_BLOCKED;
return RFKILL_USER_STATE_UNBLOCKED;
}
static ssize_t rfkill_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long flags;
u32 state;
spin_lock_irqsave(&rfkill->lock, flags);
state = rfkill->state;
spin_unlock_irqrestore(&rfkill->lock, flags);
return sprintf(buf, "%d\n", user_state_from_blocked(state));
}
static ssize_t rfkill_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
/*
* The intention was that userspace can only take control over
* a given device when/if rfkill-input doesn't control it due
* to user_claim. Since user_claim is currently unsupported,
* we never support changing the state from userspace -- this
* can be implemented again later.
*/
return -EPERM;
}
static ssize_t rfkill_claim_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", 0);
}
static ssize_t rfkill_claim_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return -EOPNOTSUPP;
}
static struct device_attribute rfkill_dev_attrs[] = {
__ATTR(name, S_IRUGO, rfkill_name_show, NULL),
__ATTR(type, S_IRUGO, rfkill_type_show, NULL),
__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
__ATTR_NULL
};
static void rfkill_release(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
kfree(rfkill);
}
static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long flags;
u32 state;
int error;
error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
if (error)
return error;
error = add_uevent_var(env, "RFKILL_TYPE=%s",
rfkill_get_type_str(rfkill->type));
if (error)
return error;
spin_lock_irqsave(&rfkill->lock, flags);
state = rfkill->state;
spin_unlock_irqrestore(&rfkill->lock, flags);
error = add_uevent_var(env, "RFKILL_STATE=%d",
user_state_from_blocked(state));
return error;
}
void rfkill_pause_polling(struct rfkill *rfkill)
{
BUG_ON(!rfkill);
if (!rfkill->ops->poll)
return;
cancel_delayed_work_sync(&rfkill->poll_work);
}
EXPORT_SYMBOL(rfkill_pause_polling);
void rfkill_resume_polling(struct rfkill *rfkill)
{
BUG_ON(!rfkill);
if (!rfkill->ops->poll)
return;
schedule_work(&rfkill->poll_work.work);
}
EXPORT_SYMBOL(rfkill_resume_polling);
static int rfkill_suspend(struct device *dev, pm_message_t state)
{
struct rfkill *rfkill = to_rfkill(dev);
rfkill_pause_polling(rfkill);
rfkill->suspended = true;
return 0;
}
static int rfkill_resume(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
bool cur;
mutex_lock(&rfkill_global_mutex);
cur = rfkill_global_states[rfkill->type].cur;
rfkill_set_block(rfkill, cur);
mutex_unlock(&rfkill_global_mutex);
rfkill->suspended = false;
schedule_work(&rfkill->uevent_work);
rfkill_resume_polling(rfkill);
return 0;
}
static struct class rfkill_class = {
.name = "rfkill",
.dev_release = rfkill_release,
.dev_attrs = rfkill_dev_attrs,
.dev_uevent = rfkill_dev_uevent,
.suspend = rfkill_suspend,
.resume = rfkill_resume,
};
struct rfkill * __must_check rfkill_alloc(const char *name,
struct device *parent,
const enum rfkill_type type,
const struct rfkill_ops *ops,
void *ops_data)
{
struct rfkill *rfkill;
struct device *dev;
if (WARN_ON(!ops))
return NULL;
if (WARN_ON(!ops->set_block))
return NULL;
if (WARN_ON(!name))
return NULL;
if (WARN_ON(type >= NUM_RFKILL_TYPES))
return NULL;
rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
if (!rfkill)
return NULL;
spin_lock_init(&rfkill->lock);
INIT_LIST_HEAD(&rfkill->node);
rfkill->type = type;
rfkill->name = name;
rfkill->ops = ops;
rfkill->data = ops_data;
dev = &rfkill->dev;
dev->class = &rfkill_class;
dev->parent = parent;
device_initialize(dev);
return rfkill;
}
EXPORT_SYMBOL(rfkill_alloc);
static void rfkill_poll(struct work_struct *work)
{
struct rfkill *rfkill;
rfkill = container_of(work, struct rfkill, poll_work.work);
/*
* Poll hardware state -- driver will use one of the
* rfkill_set{,_hw,_sw}_state functions and use its
* return value to update the current status.
*/
rfkill->ops->poll(rfkill, rfkill->data);
schedule_delayed_work(&rfkill->poll_work,
round_jiffies_relative(POLL_INTERVAL));
}
static void rfkill_uevent_work(struct work_struct *work)
{
struct rfkill *rfkill;
rfkill = container_of(work, struct rfkill, uevent_work);
rfkill_uevent(rfkill);
}
static void rfkill_sync_work(struct work_struct *work)
{
struct rfkill *rfkill;
bool cur;
rfkill = container_of(work, struct rfkill, sync_work);
mutex_lock(&rfkill_global_mutex);
cur = rfkill_global_states[rfkill->type].cur;
rfkill_set_block(rfkill, cur);
mutex_unlock(&rfkill_global_mutex);
}
int __must_check rfkill_register(struct rfkill *rfkill)
{
static unsigned long rfkill_no;
struct device *dev = &rfkill->dev;
int error;
BUG_ON(!rfkill);
mutex_lock(&rfkill_global_mutex);
if (rfkill->registered) {
error = -EALREADY;
goto unlock;
}
dev_set_name(dev, "rfkill%lu", rfkill_no);
rfkill_no++;
if (!(rfkill_states_default_locked & BIT(rfkill->type))) {
/* first of its kind */
BUILD_BUG_ON(NUM_RFKILL_TYPES >
sizeof(rfkill_states_default_locked) * 8);
rfkill_states_default_locked |= BIT(rfkill->type);
rfkill_global_states[rfkill->type].cur =
rfkill_global_states[rfkill->type].def;
}
list_add_tail(&rfkill->node, &rfkill_list);
error = device_add(dev);
if (error)
goto remove;
error = rfkill_led_trigger_register(rfkill);
if (error)
goto devdel;
rfkill->registered = true;
if (rfkill->ops->poll) {
INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
schedule_delayed_work(&rfkill->poll_work,
round_jiffies_relative(POLL_INTERVAL));
}
INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
schedule_work(&rfkill->sync_work);
mutex_unlock(&rfkill_global_mutex);
return 0;
devdel:
device_del(&rfkill->dev);
remove:
list_del_init(&rfkill->node);
unlock:
mutex_unlock(&rfkill_global_mutex);
return error;
}
EXPORT_SYMBOL(rfkill_register);
void rfkill_unregister(struct rfkill *rfkill)
{
BUG_ON(!rfkill);
if (rfkill->ops->poll)
cancel_delayed_work_sync(&rfkill->poll_work);
cancel_work_sync(&rfkill->uevent_work);
cancel_work_sync(&rfkill->sync_work);
rfkill->registered = false;
device_del(&rfkill->dev);
mutex_lock(&rfkill_global_mutex);
list_del_init(&rfkill->node);
mutex_unlock(&rfkill_global_mutex);
rfkill_led_trigger_unregister(rfkill);
}
EXPORT_SYMBOL(rfkill_unregister);
void rfkill_destroy(struct rfkill *rfkill)
{
if (rfkill)
put_device(&rfkill->dev);
}
EXPORT_SYMBOL(rfkill_destroy);
static int __init rfkill_init(void)
{
int error;
int i;
for (i = 0; i < NUM_RFKILL_TYPES; i++)
rfkill_global_states[i].def = !rfkill_default_state;
error = class_register(&rfkill_class);
if (error)
goto out;
#ifdef CONFIG_RFKILL_INPUT
error = rfkill_handler_init();
if (error)
class_unregister(&rfkill_class);
#endif
out:
return error;
}
subsys_initcall(rfkill_init);
static void __exit rfkill_exit(void)
{
#ifdef CONFIG_RFKILL_INPUT
rfkill_handler_exit();
#endif
class_unregister(&rfkill_class);
}
module_exit(rfkill_exit);

342
net/rfkill/input.c Normal file
View File

@@ -0,0 +1,342 @@
/*
* Input layer to RF Kill interface connector
*
* Copyright (c) 2007 Dmitry Torokhov
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* If you ever run into a situation in which you have a SW_ type rfkill
* input device, then you can revive code that was removed in the patch
* "rfkill-input: remove unused code".
*/
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/init.h>
#include <linux/rfkill.h>
#include <linux/sched.h>
#include "rfkill.h"
enum rfkill_input_master_mode {
RFKILL_INPUT_MASTER_UNLOCK = 0,
RFKILL_INPUT_MASTER_RESTORE = 1,
RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
NUM_RFKILL_INPUT_MASTER_MODES
};
/* Delay (in ms) between consecutive switch ops */
#define RFKILL_OPS_DELAY 200
static enum rfkill_input_master_mode rfkill_master_switch_mode =
RFKILL_INPUT_MASTER_UNBLOCKALL;
module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
MODULE_PARM_DESC(master_switch_mode,
"SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all");
static spinlock_t rfkill_op_lock;
static bool rfkill_op_pending;
static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
enum rfkill_sched_op {
RFKILL_GLOBAL_OP_EPO = 0,
RFKILL_GLOBAL_OP_RESTORE,
RFKILL_GLOBAL_OP_UNLOCK,
RFKILL_GLOBAL_OP_UNBLOCK,
};
static enum rfkill_sched_op rfkill_master_switch_op;
static enum rfkill_sched_op rfkill_op;
static void __rfkill_handle_global_op(enum rfkill_sched_op op)
{
unsigned int i;
switch (op) {
case RFKILL_GLOBAL_OP_EPO:
rfkill_epo();
break;
case RFKILL_GLOBAL_OP_RESTORE:
rfkill_restore_states();
break;
case RFKILL_GLOBAL_OP_UNLOCK:
rfkill_remove_epo_lock();
break;
case RFKILL_GLOBAL_OP_UNBLOCK:
rfkill_remove_epo_lock();
for (i = 0; i < NUM_RFKILL_TYPES; i++)
rfkill_switch_all(i, false);
break;
default:
/* memory corruption or bug, fail safely */
rfkill_epo();
WARN(1, "Unknown requested operation %d! "
"rfkill Emergency Power Off activated\n",
op);
}
}
static void __rfkill_handle_normal_op(const enum rfkill_type type,
const bool complement)
{
bool blocked;
blocked = rfkill_get_global_sw_state(type);
if (complement)
blocked = !blocked;
rfkill_switch_all(type, blocked);
}
static void rfkill_op_handler(struct work_struct *work)
{
unsigned int i;
bool c;
spin_lock_irq(&rfkill_op_lock);
do {
if (rfkill_op_pending) {
enum rfkill_sched_op op = rfkill_op;
rfkill_op_pending = false;
memset(rfkill_sw_pending, 0,
sizeof(rfkill_sw_pending));
spin_unlock_irq(&rfkill_op_lock);
__rfkill_handle_global_op(op);
spin_lock_irq(&rfkill_op_lock);
/*
* handle global ops first -- during unlocked period
* we might have gotten a new global op.
*/
if (rfkill_op_pending)
continue;
}
if (rfkill_is_epo_lock_active())
continue;
for (i = 0; i < NUM_RFKILL_TYPES; i++) {
if (__test_and_clear_bit(i, rfkill_sw_pending)) {
c = __test_and_clear_bit(i, rfkill_sw_state);
spin_unlock_irq(&rfkill_op_lock);
__rfkill_handle_normal_op(i, c);
spin_lock_irq(&rfkill_op_lock);
}
}
} while (rfkill_op_pending);
spin_unlock_irq(&rfkill_op_lock);
}
static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler);
static unsigned long rfkill_last_scheduled;
static unsigned long rfkill_ratelimit(const unsigned long last)
{
const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
return (time_after(jiffies, last + delay)) ? 0 : delay;
}
static void rfkill_schedule_ratelimited(void)
{
if (delayed_work_pending(&rfkill_op_work))
return;
schedule_delayed_work(&rfkill_op_work,
rfkill_ratelimit(rfkill_last_scheduled));
rfkill_last_scheduled = jiffies;
}
static void rfkill_schedule_global_op(enum rfkill_sched_op op)
{
unsigned long flags;
spin_lock_irqsave(&rfkill_op_lock, flags);
rfkill_op = op;
rfkill_op_pending = true;
if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
/* bypass the limiter for EPO */
cancel_delayed_work(&rfkill_op_work);
schedule_delayed_work(&rfkill_op_work, 0);
rfkill_last_scheduled = jiffies;
} else
rfkill_schedule_ratelimited();
spin_unlock_irqrestore(&rfkill_op_lock, flags);
}
static void rfkill_schedule_toggle(enum rfkill_type type)
{
unsigned long flags;
if (rfkill_is_epo_lock_active())
return;
spin_lock_irqsave(&rfkill_op_lock, flags);
if (!rfkill_op_pending) {
__set_bit(type, rfkill_sw_pending);
__change_bit(type, rfkill_sw_state);
rfkill_schedule_ratelimited();
}
spin_unlock_irqrestore(&rfkill_op_lock, flags);
}
static void rfkill_schedule_evsw_rfkillall(int state)
{
if (state)
rfkill_schedule_global_op(rfkill_master_switch_op);
else
rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
}
static void rfkill_event(struct input_handle *handle, unsigned int type,
unsigned int code, int data)
{
if (type == EV_KEY && data == 1) {
switch (code) {
case KEY_WLAN:
rfkill_schedule_toggle(RFKILL_TYPE_WLAN);
break;
case KEY_BLUETOOTH:
rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH);
break;
case KEY_UWB:
rfkill_schedule_toggle(RFKILL_TYPE_UWB);
break;
case KEY_WIMAX:
rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
break;
}
} else if (type == EV_SW && code == SW_RFKILL_ALL)
rfkill_schedule_evsw_rfkillall(data);
}
static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
struct input_handle *handle;
int error;
handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = "rfkill";
/* causes rfkill_start() to be called */
error = input_register_handle(handle);
if (error)
goto err_free_handle;
error = input_open_device(handle);
if (error)
goto err_unregister_handle;
return 0;
err_unregister_handle:
input_unregister_handle(handle);
err_free_handle:
kfree(handle);
return error;
}
static void rfkill_start(struct input_handle *handle)
{
/*
* Take event_lock to guard against configuration changes, we
* should be able to deal with concurrency with rfkill_event()
* just fine (which event_lock will also avoid).
*/
spin_lock_irq(&handle->dev->event_lock);
if (test_bit(EV_SW, handle->dev->evbit) &&
test_bit(SW_RFKILL_ALL, handle->dev->swbit))
rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
handle->dev->sw));
spin_unlock_irq(&handle->dev->event_lock);
}
static void rfkill_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
static const struct input_device_id rfkill_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
.evbit = { BIT(EV_SW) },
.swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
},
{ }
};
static struct input_handler rfkill_handler = {
.name = "rfkill",
.event = rfkill_event,
.connect = rfkill_connect,
.start = rfkill_start,
.disconnect = rfkill_disconnect,
.id_table = rfkill_ids,
};
int __init rfkill_handler_init(void)
{
switch (rfkill_master_switch_mode) {
case RFKILL_INPUT_MASTER_UNBLOCKALL:
rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK;
break;
case RFKILL_INPUT_MASTER_RESTORE:
rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE;
break;
case RFKILL_INPUT_MASTER_UNLOCK:
rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK;
break;
default:
return -EINVAL;
}
spin_lock_init(&rfkill_op_lock);
/* Avoid delay at first schedule */
rfkill_last_scheduled =
jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
return input_register_handler(&rfkill_handler);
}
void __exit rfkill_handler_exit(void)
{
input_unregister_handler(&rfkill_handler);
cancel_delayed_work_sync(&rfkill_op_work);
}

View File

@@ -1,390 +0,0 @@
/*
* Input layer to RF Kill interface connector
*
* Copyright (c) 2007 Dmitry Torokhov
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/init.h>
#include <linux/rfkill.h>
#include <linux/sched.h>
#include "rfkill-input.h"
MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
MODULE_DESCRIPTION("Input layer to RF switch connector");
MODULE_LICENSE("GPL");
enum rfkill_input_master_mode {
RFKILL_INPUT_MASTER_DONOTHING = 0,
RFKILL_INPUT_MASTER_RESTORE = 1,
RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
RFKILL_INPUT_MASTER_MAX, /* marker */
};
/* Delay (in ms) between consecutive switch ops */
#define RFKILL_OPS_DELAY 200
static enum rfkill_input_master_mode rfkill_master_switch_mode =
RFKILL_INPUT_MASTER_UNBLOCKALL;
module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
MODULE_PARM_DESC(master_switch_mode,
"SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all");
enum rfkill_global_sched_op {
RFKILL_GLOBAL_OP_EPO = 0,
RFKILL_GLOBAL_OP_RESTORE,
RFKILL_GLOBAL_OP_UNLOCK,
RFKILL_GLOBAL_OP_UNBLOCK,
};
struct rfkill_task {
struct delayed_work dwork;
/* ensures that task is serialized */
struct mutex mutex;
/* protects everything below */
spinlock_t lock;
/* pending regular switch operations (1=pending) */
unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
/* should the state be complemented (1=yes) */
unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
bool global_op_pending;
enum rfkill_global_sched_op op;
/* last time it was scheduled */
unsigned long last_scheduled;
};
static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
{
unsigned int i;
switch (op) {
case RFKILL_GLOBAL_OP_EPO:
rfkill_epo();
break;
case RFKILL_GLOBAL_OP_RESTORE:
rfkill_restore_states();
break;
case RFKILL_GLOBAL_OP_UNLOCK:
rfkill_remove_epo_lock();
break;
case RFKILL_GLOBAL_OP_UNBLOCK:
rfkill_remove_epo_lock();
for (i = 0; i < RFKILL_TYPE_MAX; i++)
rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED);
break;
default:
/* memory corruption or bug, fail safely */
rfkill_epo();
WARN(1, "Unknown requested operation %d! "
"rfkill Emergency Power Off activated\n",
op);
}
}
static void __rfkill_handle_normal_op(const enum rfkill_type type,
const bool c)
{
enum rfkill_state state;
state = rfkill_get_global_state(type);
if (c)
state = rfkill_state_complement(state);
rfkill_switch_all(type, state);
}
static void rfkill_task_handler(struct work_struct *work)
{
struct rfkill_task *task = container_of(work,
struct rfkill_task, dwork.work);
bool doit = true;
mutex_lock(&task->mutex);
spin_lock_irq(&task->lock);
while (doit) {
if (task->global_op_pending) {
enum rfkill_global_sched_op op = task->op;
task->global_op_pending = false;
memset(task->sw_pending, 0, sizeof(task->sw_pending));
spin_unlock_irq(&task->lock);
__rfkill_handle_global_op(op);
/* make sure we do at least one pass with
* !task->global_op_pending */
spin_lock_irq(&task->lock);
continue;
} else if (!rfkill_is_epo_lock_active()) {
unsigned int i = 0;
while (!task->global_op_pending &&
i < RFKILL_TYPE_MAX) {
if (test_and_clear_bit(i, task->sw_pending)) {
bool c;
c = test_and_clear_bit(i,
task->sw_togglestate);
spin_unlock_irq(&task->lock);
__rfkill_handle_normal_op(i, c);
spin_lock_irq(&task->lock);
}
i++;
}
}
doit = task->global_op_pending;
}
spin_unlock_irq(&task->lock);
mutex_unlock(&task->mutex);
}
static struct rfkill_task rfkill_task = {
.dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork,
rfkill_task_handler),
.mutex = __MUTEX_INITIALIZER(rfkill_task.mutex),
.lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock),
};
static unsigned long rfkill_ratelimit(const unsigned long last)
{
const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
return (time_after(jiffies, last + delay)) ? 0 : delay;
}
static void rfkill_schedule_ratelimited(void)
{
if (!delayed_work_pending(&rfkill_task.dwork)) {
schedule_delayed_work(&rfkill_task.dwork,
rfkill_ratelimit(rfkill_task.last_scheduled));
rfkill_task.last_scheduled = jiffies;
}
}
static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
{
unsigned long flags;
spin_lock_irqsave(&rfkill_task.lock, flags);
rfkill_task.op = op;
rfkill_task.global_op_pending = true;
if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
/* bypass the limiter for EPO */
cancel_delayed_work(&rfkill_task.dwork);
schedule_delayed_work(&rfkill_task.dwork, 0);
rfkill_task.last_scheduled = jiffies;
} else
rfkill_schedule_ratelimited();
spin_unlock_irqrestore(&rfkill_task.lock, flags);
}
static void rfkill_schedule_toggle(enum rfkill_type type)
{
unsigned long flags;
if (rfkill_is_epo_lock_active())
return;
spin_lock_irqsave(&rfkill_task.lock, flags);
if (!rfkill_task.global_op_pending) {
set_bit(type, rfkill_task.sw_pending);
change_bit(type, rfkill_task.sw_togglestate);
rfkill_schedule_ratelimited();
}
spin_unlock_irqrestore(&rfkill_task.lock, flags);
}
static void rfkill_schedule_evsw_rfkillall(int state)
{
if (state) {
switch (rfkill_master_switch_mode) {
case RFKILL_INPUT_MASTER_UNBLOCKALL:
rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK);
break;
case RFKILL_INPUT_MASTER_RESTORE:
rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE);
break;
case RFKILL_INPUT_MASTER_DONOTHING:
rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK);
break;
default:
/* memory corruption or driver bug! fail safely */
rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
WARN(1, "Unknown rfkill_master_switch_mode (%d), "
"driver bug or memory corruption detected!\n",
rfkill_master_switch_mode);
break;
}
} else
rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
}
static void rfkill_event(struct input_handle *handle, unsigned int type,
unsigned int code, int data)
{
if (type == EV_KEY && data == 1) {
enum rfkill_type t;
switch (code) {
case KEY_WLAN:
t = RFKILL_TYPE_WLAN;
break;
case KEY_BLUETOOTH:
t = RFKILL_TYPE_BLUETOOTH;
break;
case KEY_UWB:
t = RFKILL_TYPE_UWB;
break;
case KEY_WIMAX:
t = RFKILL_TYPE_WIMAX;
break;
default:
return;
}
rfkill_schedule_toggle(t);
return;
} else if (type == EV_SW) {
switch (code) {
case SW_RFKILL_ALL:
rfkill_schedule_evsw_rfkillall(data);
return;
default:
return;
}
}
}
static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
struct input_handle *handle;
int error;
handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = "rfkill";
/* causes rfkill_start() to be called */
error = input_register_handle(handle);
if (error)
goto err_free_handle;
error = input_open_device(handle);
if (error)
goto err_unregister_handle;
return 0;
err_unregister_handle:
input_unregister_handle(handle);
err_free_handle:
kfree(handle);
return error;
}
static void rfkill_start(struct input_handle *handle)
{
/* Take event_lock to guard against configuration changes, we
* should be able to deal with concurrency with rfkill_event()
* just fine (which event_lock will also avoid). */
spin_lock_irq(&handle->dev->event_lock);
if (test_bit(EV_SW, handle->dev->evbit)) {
if (test_bit(SW_RFKILL_ALL, handle->dev->swbit))
rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
handle->dev->sw));
/* add resync for further EV_SW events here */
}
spin_unlock_irq(&handle->dev->event_lock);
}
static void rfkill_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
static const struct input_device_id rfkill_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
.evbit = { BIT(EV_SW) },
.swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
},
{ }
};
static struct input_handler rfkill_handler = {
.event = rfkill_event,
.connect = rfkill_connect,
.disconnect = rfkill_disconnect,
.start = rfkill_start,
.name = "rfkill",
.id_table = rfkill_ids,
};
static int __init rfkill_handler_init(void)
{
if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX)
return -EINVAL;
/*
* The penalty to not doing this is a possible RFKILL_OPS_DELAY delay
* at the first use. Acceptable, but if we can avoid it, why not?
*/
rfkill_task.last_scheduled =
jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
return input_register_handler(&rfkill_handler);
}
static void __exit rfkill_handler_exit(void)
{
input_unregister_handler(&rfkill_handler);
cancel_delayed_work_sync(&rfkill_task.dwork);
rfkill_remove_epo_lock();
}
module_init(rfkill_handler_init);
module_exit(rfkill_handler_exit);

View File

@@ -1,855 +0,0 @@
/*
* Copyright (C) 2006 - 2007 Ivo van Doorn
* Copyright (C) 2007 Dmitry Torokhov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/workqueue.h>
#include <linux/capability.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rfkill.h>
/* Get declaration of rfkill_switch_all() to shut up sparse. */
#include "rfkill-input.h"
MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
MODULE_VERSION("1.0");
MODULE_DESCRIPTION("RF switch support");
MODULE_LICENSE("GPL");
static LIST_HEAD(rfkill_list); /* list of registered rf switches */
static DEFINE_MUTEX(rfkill_global_mutex);
static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED;
module_param_named(default_state, rfkill_default_state, uint, 0444);
MODULE_PARM_DESC(default_state,
"Default initial state for all radio types, 0 = radio off");
struct rfkill_gsw_state {
enum rfkill_state current_state;
enum rfkill_state default_state;
};
static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX];
static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
static bool rfkill_epo_lock_active;
#ifdef CONFIG_RFKILL_LEDS
static void rfkill_led_trigger(struct rfkill *rfkill,
enum rfkill_state state)
{
struct led_trigger *led = &rfkill->led_trigger;
if (!led->name)
return;
if (state != RFKILL_STATE_UNBLOCKED)
led_trigger_event(led, LED_OFF);
else
led_trigger_event(led, LED_FULL);
}
static void rfkill_led_trigger_activate(struct led_classdev *led)
{
struct rfkill *rfkill = container_of(led->trigger,
struct rfkill, led_trigger);
rfkill_led_trigger(rfkill, rfkill->state);
}
#else
static inline void rfkill_led_trigger(struct rfkill *rfkill,
enum rfkill_state state)
{
}
#endif /* CONFIG_RFKILL_LEDS */
static void rfkill_uevent(struct rfkill *rfkill)
{
kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
}
static void update_rfkill_state(struct rfkill *rfkill)
{
enum rfkill_state newstate, oldstate;
if (rfkill->get_state) {
mutex_lock(&rfkill->mutex);
if (!rfkill->get_state(rfkill->data, &newstate)) {
oldstate = rfkill->state;
rfkill->state = newstate;
if (oldstate != newstate)
rfkill_uevent(rfkill);
}
mutex_unlock(&rfkill->mutex);
}
rfkill_led_trigger(rfkill, rfkill->state);
}
/**
* rfkill_toggle_radio - wrapper for toggle_radio hook
* @rfkill: the rfkill struct to use
* @force: calls toggle_radio even if cache says it is not needed,
* and also makes sure notifications of the state will be
* sent even if it didn't change
* @state: the new state to call toggle_radio() with
*
* Calls rfkill->toggle_radio, enforcing the API for toggle_radio
* calls and handling all the red tape such as issuing notifications
* if the call is successful.
*
* Suspended devices are not touched at all, and -EAGAIN is returned.
*
* Note that the @force parameter cannot override a (possibly cached)
* state of RFKILL_STATE_HARD_BLOCKED. Any device making use of
* RFKILL_STATE_HARD_BLOCKED implements either get_state() or
* rfkill_force_state(), so the cache either is bypassed or valid.
*
* Note that we do call toggle_radio for RFKILL_STATE_SOFT_BLOCKED
* even if the radio is in RFKILL_STATE_HARD_BLOCKED state, so as to
* give the driver a hint that it should double-BLOCK the transmitter.
*
* Caller must have acquired rfkill->mutex.
*/
static int rfkill_toggle_radio(struct rfkill *rfkill,
enum rfkill_state state,
int force)
{
int retval = 0;
enum rfkill_state oldstate, newstate;
if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
return -EBUSY;
oldstate = rfkill->state;
if (rfkill->get_state && !force &&
!rfkill->get_state(rfkill->data, &newstate)) {
rfkill->state = newstate;
}
switch (state) {
case RFKILL_STATE_HARD_BLOCKED:
/* typically happens when refreshing hardware state,
* such as on resume */
state = RFKILL_STATE_SOFT_BLOCKED;
break;
case RFKILL_STATE_UNBLOCKED:
/* force can't override this, only rfkill_force_state() can */
if (rfkill->state == RFKILL_STATE_HARD_BLOCKED)
return -EPERM;
break;
case RFKILL_STATE_SOFT_BLOCKED:
/* nothing to do, we want to give drivers the hint to double
* BLOCK even a transmitter that is already in state
* RFKILL_STATE_HARD_BLOCKED */
break;
default:
WARN(1, KERN_WARNING
"rfkill: illegal state %d passed as parameter "
"to rfkill_toggle_radio\n", state);
return -EINVAL;
}
if (force || state != rfkill->state) {
retval = rfkill->toggle_radio(rfkill->data, state);
/* never allow a HARD->SOFT downgrade! */
if (!retval && rfkill->state != RFKILL_STATE_HARD_BLOCKED)
rfkill->state = state;
}
if (force || rfkill->state != oldstate)
rfkill_uevent(rfkill);
rfkill_led_trigger(rfkill, rfkill->state);
return retval;
}
/**
* __rfkill_switch_all - Toggle state of all switches of given type
* @type: type of interfaces to be affected
* @state: the new state
*
* This function toggles the state of all switches of given type,
* unless a specific switch is claimed by userspace (in which case,
* that switch is left alone) or suspended.
*
* Caller must have acquired rfkill_global_mutex.
*/
static void __rfkill_switch_all(const enum rfkill_type type,
const enum rfkill_state state)
{
struct rfkill *rfkill;
if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX),
KERN_WARNING
"rfkill: illegal state %d or type %d "
"passed as parameter to __rfkill_switch_all\n",
state, type))
return;
rfkill_global_states[type].current_state = state;
list_for_each_entry(rfkill, &rfkill_list, node) {
if (rfkill->type == type) {
mutex_lock(&rfkill->mutex);
rfkill_toggle_radio(rfkill, state, 0);
mutex_unlock(&rfkill->mutex);
rfkill_led_trigger(rfkill, rfkill->state);
}
}
}
/**
* rfkill_switch_all - Toggle state of all switches of given type
* @type: type of interfaces to be affected
* @state: the new state
*
* Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
* Please refer to __rfkill_switch_all() for details.
*
* Does nothing if the EPO lock is active.
*/
void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
{
mutex_lock(&rfkill_global_mutex);
if (!rfkill_epo_lock_active)
__rfkill_switch_all(type, state);
mutex_unlock(&rfkill_global_mutex);
}
EXPORT_SYMBOL(rfkill_switch_all);
/**
* rfkill_epo - emergency power off all transmitters
*
* This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
* ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
*
* The global state before the EPO is saved and can be restored later
* using rfkill_restore_states().
*/
void rfkill_epo(void)
{
struct rfkill *rfkill;
int i;
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = true;
list_for_each_entry(rfkill, &rfkill_list, node) {
mutex_lock(&rfkill->mutex);
rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
mutex_unlock(&rfkill->mutex);
}
for (i = 0; i < RFKILL_TYPE_MAX; i++) {
rfkill_global_states[i].default_state =
rfkill_global_states[i].current_state;
rfkill_global_states[i].current_state =
RFKILL_STATE_SOFT_BLOCKED;
}
mutex_unlock(&rfkill_global_mutex);
rfkill_led_trigger(rfkill, rfkill->state);
}
EXPORT_SYMBOL_GPL(rfkill_epo);
/**
* rfkill_restore_states - restore global states
*
* Restore (and sync switches to) the global state from the
* states in rfkill_default_states. This can undo the effects of
* a call to rfkill_epo().
*/
void rfkill_restore_states(void)
{
int i;
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = false;
for (i = 0; i < RFKILL_TYPE_MAX; i++)
__rfkill_switch_all(i, rfkill_global_states[i].default_state);
mutex_unlock(&rfkill_global_mutex);
}
EXPORT_SYMBOL_GPL(rfkill_restore_states);
/**
* rfkill_remove_epo_lock - unlock state changes
*
* Used by rfkill-input manually unlock state changes, when
* the EPO switch is deactivated.
*/
void rfkill_remove_epo_lock(void)
{
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = false;
mutex_unlock(&rfkill_global_mutex);
}
EXPORT_SYMBOL_GPL(rfkill_remove_epo_lock);
/**
* rfkill_is_epo_lock_active - returns true EPO is active
*
* Returns 0 (false) if there is NOT an active EPO contidion,
* and 1 (true) if there is an active EPO contition, which
* locks all radios in one of the BLOCKED states.
*
* Can be called in atomic context.
*/
bool rfkill_is_epo_lock_active(void)
{
return rfkill_epo_lock_active;
}
EXPORT_SYMBOL_GPL(rfkill_is_epo_lock_active);
/**
* rfkill_get_global_state - returns global state for a type
* @type: the type to get the global state of
*
* Returns the current global state for a given wireless
* device type.
*/
enum rfkill_state rfkill_get_global_state(const enum rfkill_type type)
{
return rfkill_global_states[type].current_state;
}
EXPORT_SYMBOL_GPL(rfkill_get_global_state);
/**
* rfkill_force_state - Force the internal rfkill radio state
* @rfkill: pointer to the rfkill class to modify.
* @state: the current radio state the class should be forced to.
*
* This function updates the internal state of the radio cached
* by the rfkill class. It should be used when the driver gets
* a notification by the firmware/hardware of the current *real*
* state of the radio rfkill switch.
*
* Devices which are subject to external changes on their rfkill
* state (such as those caused by a hardware rfkill line) MUST
* have their driver arrange to call rfkill_force_state() as soon
* as possible after such a change.
*
* This function may not be called from an atomic context.
*/
int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state)
{
enum rfkill_state oldstate;
BUG_ON(!rfkill);
if (WARN((state >= RFKILL_STATE_MAX),
KERN_WARNING
"rfkill: illegal state %d passed as parameter "
"to rfkill_force_state\n", state))
return -EINVAL;
mutex_lock(&rfkill->mutex);
oldstate = rfkill->state;
rfkill->state = state;
if (state != oldstate)
rfkill_uevent(rfkill);
mutex_unlock(&rfkill->mutex);
rfkill_led_trigger(rfkill, rfkill->state);
return 0;
}
EXPORT_SYMBOL(rfkill_force_state);
static ssize_t rfkill_name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%s\n", rfkill->name);
}
static const char *rfkill_get_type_str(enum rfkill_type type)
{
switch (type) {
case RFKILL_TYPE_WLAN:
return "wlan";
case RFKILL_TYPE_BLUETOOTH:
return "bluetooth";
case RFKILL_TYPE_UWB:
return "ultrawideband";
case RFKILL_TYPE_WIMAX:
return "wimax";
case RFKILL_TYPE_WWAN:
return "wwan";
default:
BUG();
}
}
static ssize_t rfkill_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
}
static ssize_t rfkill_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
update_rfkill_state(rfkill);
return sprintf(buf, "%d\n", rfkill->state);
}
static ssize_t rfkill_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long state;
int error;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
error = strict_strtoul(buf, 0, &state);
if (error)
return error;
/* RFKILL_STATE_HARD_BLOCKED is illegal here... */
if (state != RFKILL_STATE_UNBLOCKED &&
state != RFKILL_STATE_SOFT_BLOCKED)
return -EINVAL;
error = mutex_lock_killable(&rfkill->mutex);
if (error)
return error;
if (!rfkill_epo_lock_active)
error = rfkill_toggle_radio(rfkill, state, 0);
else
error = -EPERM;
mutex_unlock(&rfkill->mutex);
return error ? error : count;
}
static ssize_t rfkill_claim_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", 0);
}
static ssize_t rfkill_claim_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return -EOPNOTSUPP;
}
static struct device_attribute rfkill_dev_attrs[] = {
__ATTR(name, S_IRUGO, rfkill_name_show, NULL),
__ATTR(type, S_IRUGO, rfkill_type_show, NULL),
__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
__ATTR_NULL
};
static void rfkill_release(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
kfree(rfkill);
module_put(THIS_MODULE);
}
#ifdef CONFIG_PM
static int rfkill_suspend(struct device *dev, pm_message_t state)
{
struct rfkill *rfkill = to_rfkill(dev);
/* mark class device as suspended */
if (dev->power.power_state.event != state.event)
dev->power.power_state = state;
/* store state for the resume handler */
rfkill->state_for_resume = rfkill->state;
return 0;
}
static int rfkill_resume(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
enum rfkill_state newstate;
if (dev->power.power_state.event != PM_EVENT_ON) {
mutex_lock(&rfkill->mutex);
dev->power.power_state.event = PM_EVENT_ON;
/*
* rfkill->state could have been modified before we got
* called, and won't be updated by rfkill_toggle_radio()
* in force mode. Sync it FIRST.
*/
if (rfkill->get_state &&
!rfkill->get_state(rfkill->data, &newstate))
rfkill->state = newstate;
/*
* If we are under EPO, kick transmitter offline,
* otherwise restore to pre-suspend state.
*
* Issue a notification in any case
*/
rfkill_toggle_radio(rfkill,
rfkill_epo_lock_active ?
RFKILL_STATE_SOFT_BLOCKED :
rfkill->state_for_resume,
1);
mutex_unlock(&rfkill->mutex);
rfkill_led_trigger(rfkill, rfkill->state);
}
return 0;
}
#else
#define rfkill_suspend NULL
#define rfkill_resume NULL
#endif
static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct rfkill *rfkill = to_rfkill(dev);
int error;
error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
if (error)
return error;
error = add_uevent_var(env, "RFKILL_TYPE=%s",
rfkill_get_type_str(rfkill->type));
if (error)
return error;
error = add_uevent_var(env, "RFKILL_STATE=%d", rfkill->state);
return error;
}
static struct class rfkill_class = {
.name = "rfkill",
.dev_release = rfkill_release,
.dev_attrs = rfkill_dev_attrs,
.suspend = rfkill_suspend,
.resume = rfkill_resume,
.dev_uevent = rfkill_dev_uevent,
};
static int rfkill_check_duplicity(const struct rfkill *rfkill)
{
struct rfkill *p;
unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
memset(seen, 0, sizeof(seen));
list_for_each_entry(p, &rfkill_list, node) {
if (WARN((p == rfkill), KERN_WARNING
"rfkill: illegal attempt to register "
"an already registered rfkill struct\n"))
return -EEXIST;
set_bit(p->type, seen);
}
/* 0: first switch of its kind */
return (test_bit(rfkill->type, seen)) ? 1 : 0;
}
static int rfkill_add_switch(struct rfkill *rfkill)
{
int error;
mutex_lock(&rfkill_global_mutex);
error = rfkill_check_duplicity(rfkill);
if (error < 0)
goto unlock_out;
if (!error) {
/* lock default after first use */
set_bit(rfkill->type, rfkill_states_lockdflt);
rfkill_global_states[rfkill->type].current_state =
rfkill_global_states[rfkill->type].default_state;
}
rfkill_toggle_radio(rfkill,
rfkill_global_states[rfkill->type].current_state,
0);
list_add_tail(&rfkill->node, &rfkill_list);
error = 0;
unlock_out:
mutex_unlock(&rfkill_global_mutex);
return error;
}
static void rfkill_remove_switch(struct rfkill *rfkill)
{
mutex_lock(&rfkill_global_mutex);
list_del_init(&rfkill->node);
mutex_unlock(&rfkill_global_mutex);
mutex_lock(&rfkill->mutex);
rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
mutex_unlock(&rfkill->mutex);
}
/**
* rfkill_allocate - allocate memory for rfkill structure.
* @parent: device that has rf switch on it
* @type: type of the switch (RFKILL_TYPE_*)
*
* This function should be called by the network driver when it needs
* rfkill structure. Once the structure is allocated the driver should
* finish its initialization by setting the name, private data, enable_radio
* and disable_radio methods and then register it with rfkill_register().
*
* NOTE: If registration fails the structure shoudl be freed by calling
* rfkill_free() otherwise rfkill_unregister() should be used.
*/
struct rfkill * __must_check rfkill_allocate(struct device *parent,
enum rfkill_type type)
{
struct rfkill *rfkill;
struct device *dev;
if (WARN((type >= RFKILL_TYPE_MAX),
KERN_WARNING
"rfkill: illegal type %d passed as parameter "
"to rfkill_allocate\n", type))
return NULL;
rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL);
if (!rfkill)
return NULL;
mutex_init(&rfkill->mutex);
INIT_LIST_HEAD(&rfkill->node);
rfkill->type = type;
dev = &rfkill->dev;
dev->class = &rfkill_class;
dev->parent = parent;
device_initialize(dev);
__module_get(THIS_MODULE);
return rfkill;
}
EXPORT_SYMBOL(rfkill_allocate);
/**
* rfkill_free - Mark rfkill structure for deletion
* @rfkill: rfkill structure to be destroyed
*
* Decrements reference count of the rfkill structure so it is destroyed.
* Note that rfkill_free() should _not_ be called after rfkill_unregister().
*/
void rfkill_free(struct rfkill *rfkill)
{
if (rfkill)
put_device(&rfkill->dev);
}
EXPORT_SYMBOL(rfkill_free);
static void rfkill_led_trigger_register(struct rfkill *rfkill)
{
#ifdef CONFIG_RFKILL_LEDS
int error;
if (!rfkill->led_trigger.name)
rfkill->led_trigger.name = dev_name(&rfkill->dev);
if (!rfkill->led_trigger.activate)
rfkill->led_trigger.activate = rfkill_led_trigger_activate;
error = led_trigger_register(&rfkill->led_trigger);
if (error)
rfkill->led_trigger.name = NULL;
#endif /* CONFIG_RFKILL_LEDS */
}
static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
{
#ifdef CONFIG_RFKILL_LEDS
if (rfkill->led_trigger.name) {
led_trigger_unregister(&rfkill->led_trigger);
rfkill->led_trigger.name = NULL;
}
#endif
}
/**
* rfkill_register - Register a rfkill structure.
* @rfkill: rfkill structure to be registered
*
* This function should be called by the network driver when the rfkill
* structure needs to be registered. Immediately from registration the
* switch driver should be able to service calls to toggle_radio.
*/
int __must_check rfkill_register(struct rfkill *rfkill)
{
static atomic_t rfkill_no = ATOMIC_INIT(0);
struct device *dev = &rfkill->dev;
int error;
if (WARN((!rfkill || !rfkill->toggle_radio ||
rfkill->type >= RFKILL_TYPE_MAX ||
rfkill->state >= RFKILL_STATE_MAX),
KERN_WARNING
"rfkill: attempt to register a "
"badly initialized rfkill struct\n"))
return -EINVAL;
dev_set_name(dev, "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1);
rfkill_led_trigger_register(rfkill);
error = rfkill_add_switch(rfkill);
if (error) {
rfkill_led_trigger_unregister(rfkill);
return error;
}
error = device_add(dev);
if (error) {
rfkill_remove_switch(rfkill);
rfkill_led_trigger_unregister(rfkill);
return error;
}
return 0;
}
EXPORT_SYMBOL(rfkill_register);
/**
* rfkill_unregister - Unregister a rfkill structure.
* @rfkill: rfkill structure to be unregistered
*
* This function should be called by the network driver during device
* teardown to destroy rfkill structure. Note that rfkill_free() should
* _not_ be called after rfkill_unregister().
*/
void rfkill_unregister(struct rfkill *rfkill)
{
BUG_ON(!rfkill);
device_del(&rfkill->dev);
rfkill_remove_switch(rfkill);
rfkill_led_trigger_unregister(rfkill);
put_device(&rfkill->dev);
}
EXPORT_SYMBOL(rfkill_unregister);
/**
* rfkill_set_default - set initial value for a switch type
* @type - the type of switch to set the default state of
* @state - the new default state for that group of switches
*
* Sets the initial state rfkill should use for a given type.
* The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED
* and RFKILL_STATE_UNBLOCKED.
*
* This function is meant to be used by platform drivers for platforms
* that can save switch state across power down/reboot.
*
* The default state for each switch type can be changed exactly once.
* After a switch of that type is registered, the default state cannot
* be changed anymore. This guards against multiple drivers it the
* same platform trying to set the initial switch default state, which
* is not allowed.
*
* Returns -EPERM if the state has already been set once or is in use,
* so drivers likely want to either ignore or at most printk(KERN_NOTICE)
* if this function returns -EPERM.
*
* Returns 0 if the new default state was set, or an error if it
* could not be set.
*/
int rfkill_set_default(enum rfkill_type type, enum rfkill_state state)
{
int error;
if (WARN((type >= RFKILL_TYPE_MAX ||
(state != RFKILL_STATE_SOFT_BLOCKED &&
state != RFKILL_STATE_UNBLOCKED)),
KERN_WARNING
"rfkill: illegal state %d or type %d passed as "
"parameter to rfkill_set_default\n", state, type))
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
if (!test_and_set_bit(type, rfkill_states_lockdflt)) {
rfkill_global_states[type].default_state = state;
rfkill_global_states[type].current_state = state;
error = 0;
} else
error = -EPERM;
mutex_unlock(&rfkill_global_mutex);
return error;
}
EXPORT_SYMBOL_GPL(rfkill_set_default);
/*
* Rfkill module initialization/deinitialization.
*/
static int __init rfkill_init(void)
{
int error;
int i;
/* RFKILL_STATE_HARD_BLOCKED is illegal here... */
if (rfkill_default_state != RFKILL_STATE_SOFT_BLOCKED &&
rfkill_default_state != RFKILL_STATE_UNBLOCKED)
return -EINVAL;
for (i = 0; i < RFKILL_TYPE_MAX; i++)
rfkill_global_states[i].default_state = rfkill_default_state;
error = class_register(&rfkill_class);
if (error) {
printk(KERN_ERR "rfkill: unable to register rfkill class\n");
return error;
}
return 0;
}
static void __exit rfkill_exit(void)
{
class_unregister(&rfkill_class);
}
subsys_initcall(rfkill_init);
module_exit(rfkill_exit);

View File

@@ -1,5 +1,6 @@
/*
* Copyright (C) 2007 Ivo van Doorn
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
*/
/*
@@ -11,11 +12,16 @@
#ifndef __RFKILL_INPUT_H
#define __RFKILL_INPUT_H
void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state);
/* core code */
void rfkill_switch_all(const enum rfkill_type type, bool blocked);
void rfkill_epo(void);
void rfkill_restore_states(void);
void rfkill_remove_epo_lock(void);
bool rfkill_is_epo_lock_active(void);
enum rfkill_state rfkill_get_global_state(const enum rfkill_type type);
bool rfkill_get_global_sw_state(const enum rfkill_type type);
/* input handler */
int rfkill_handler_init(void);
void rfkill_handler_exit(void);
#endif /* __RFKILL_INPUT_H */